code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package bio4j.data.uniprot.flat
import bio4j.data.uniprot.seqOps._
/*
Example:
```
Q16653; O00713; O00714; O00715; Q13054; Q13055; Q14855; Q92891;
Q92892; Q92893; Q92894; Q92895; Q93053; Q96KU9; Q96KV0; Q96KV1;
Q99605;
```
*/
case class AC(val lines: Seq[String]) extends AnyVal {
private def joinedLines: String =
lines.mkString("")
def accesions: Seq[String] =
joinedLines
.splitSegments(_ == ';')
.map(_.trim)
}
|
bio4j/data.uniprot
|
src/main/scala/flat/AC.scala
|
Scala
|
agpl-3.0
| 456
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.codegen.agg
import org.apache.flink.table.api.TableException
import org.apache.flink.table.codegen.CodeGenUtils.{BASE_ROW, _}
import org.apache.flink.table.codegen.Indenter.toISC
import org.apache.flink.table.codegen._
import org.apache.flink.table.codegen.agg.AggsHandlerCodeGenerator._
import org.apache.flink.table.dataformat.{BaseRow, GenericRow}
import org.apache.flink.table.dataview._
import org.apache.flink.table.expressions._
import org.apache.flink.table.functions.AggregateFunction
import org.apache.flink.table.functions.aggfunctions.DeclarativeAggregateFunction
import org.apache.flink.table.generated.{AggsHandleFunction, GeneratedAggsHandleFunction, GeneratedNamespaceAggsHandleFunction, NamespaceAggsHandleFunction}
import org.apache.flink.table.plan.util.AggregateInfoList
import org.apache.flink.table.types.LogicalTypeDataTypeConverter.fromDataTypeToLogicalType
import org.apache.flink.table.types.{DataType, LogicalTypeDataTypeConverter}
import org.apache.flink.table.types.TypeInfoLogicalTypeConverter.fromTypeInfoToLogicalType
import org.apache.flink.table.types.logical.{BooleanType, IntType, LogicalType, RowType}
import org.apache.flink.table.types.utils.TypeConversions
import org.apache.flink.table.types.utils.TypeConversions.fromLegacyInfoToDataType
import org.apache.calcite.rex.RexLiteral
import org.apache.calcite.tools.RelBuilder
/**
* A code generator for generating [[AggsHandleFunction]].
*
* @param copyInputField copy input field element if true (only mutable type will be copied),
* set to true if field will be buffered (such as local aggregate)
*/
class AggsHandlerCodeGenerator(
ctx: CodeGeneratorContext,
relBuilder: RelBuilder,
inputFieldTypes: Seq[LogicalType],
copyInputField: Boolean) {
private val inputType = RowType.of(inputFieldTypes: _*)
/** constant expressions that act like a second input in the parameter indices. */
private var constantExprs: Seq[GeneratedExpression] = Seq()
/** window properties like window_start and window_end, only used in window aggregates */
private var namespaceClassName: String = _
private var windowProperties: Seq[PlannerWindowProperty] = Seq()
private var hasNamespace: Boolean = false
/** Aggregates informations */
private var accTypeInfo: RowType = _
private var aggBufferSize: Int = _
private var mergedAccExternalTypes: Array[DataType] = _
private var mergedAccOffset: Int = 0
private var mergedAccOnHeap: Boolean = false
private var ignoreAggValues: Array[Int] = Array()
private var isAccumulateNeeded = false
private var isRetractNeeded = false
private var isMergeNeeded = false
var valueType: RowType = _
/**
* The [[aggBufferCodeGens]] and [[aggActionCodeGens]] will be both created when code generate
* an [[AggsHandleFunction]] or [[NamespaceAggsHandleFunction]]. They both contain all the
* same AggCodeGens, but are different in the organizational form. The [[aggBufferCodeGens]]
* flatten all the AggCodeGens in a flat format. The [[aggActionCodeGens]] organize all the
* AggCodeGens in a tree format. If there is no distinct aggregate, the [[aggBufferCodeGens]]
* and [[aggActionCodeGens]] are totally the same.
*
* When different aggregate distinct on the same field but on different filter conditions,
* they will share the same distinct state, see DistinctAggCodeGen.DistinctValueGenerator
* for more information.
*/
/**
* The aggBufferCodeGens is organized according to the agg buffer order, which is in a flat
* format, and is only used to generate the methods relative to accumulators, Such as
* [[genCreateAccumulators()]], [[genGetAccumulators()]], [[genSetAccumulators()]].
*
* For example if we have :
* count(*), count(distinct a), count(distinct a) filter d > 5, sum(a), sum(distinct a)
*
* then the members of aggBufferCodeGens are organized looks like this:
* +----------+-----------+-----------+---------+---------+----------------+
* | count(*) | count(a') | count(a') | sum(a) | sum(a') | distinct(a) a' |
* +----------+-----------+-----------+---------+---------+----------------+
* */
private var aggBufferCodeGens: Array[AggCodeGen] = _
/**
* The aggActionCodeGens is organized according to the aggregate calling order, which is in
* a tree format. Such as the aggregates distinct on the same fields should be accumulated
* together when distinct is satisfied. And this is only used to generate the methods relative
* to aggregate action. Such as [[genAccumulate()]], [[genRetract()]], [[genMerge()]].
*
* For example if we have :
* count(*), count(distinct a), count(distinct a) filter d > 5, sum(a), sum(distinct a)
*
* then the members of aggActionCodeGens are organized looks like this:
*
* +----------------------------------------------------+
* | count(*) | sum(a) | distinct(a) a' |
* | | | |-- count(a') |
* | | | |-- count(a') (filter d > 5) |
* | | | |-- sum(a') |
* +----------------------------------------------------+
*/
private var aggActionCodeGens: Array[AggCodeGen] = _
/**
* Adds constant expressions that act like a second input in the parameter indices.
*/
def withConstants(literals: Seq[RexLiteral]): AggsHandlerCodeGenerator = {
// create constants
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
val exprs = literals.map(exprGenerator.generateExpression)
this.constantExprs = exprs.map(ctx.addReusableConstant(_, nullCheck = true))
this
}
/**
* Tells the generator to generate `accumulate(..)` method for the [[AggsHandleFunction]] and
* [[NamespaceAggsHandleFunction]]. Default not generate `accumulate(..)` method.
*/
def needAccumulate(): AggsHandlerCodeGenerator = {
this.isAccumulateNeeded = true
this
}
/**
* Tells the generator to generate `retract(..)` method for the [[AggsHandleFunction]] and
* [[NamespaceAggsHandleFunction]]. Default not generate `retract(..)` method.
*
* @return
*/
def needRetract(): AggsHandlerCodeGenerator = {
this.isRetractNeeded = true
this
}
/**
* Tells the generator to generate `merge(..)` method with the merged accumulator information
* for the [[AggsHandleFunction]] and [[NamespaceAggsHandleFunction]].
* Default not generate `merge(..)` method.
*
* @param mergedAccOffset the mergedAcc may come from local aggregate,
* this is the first buffer offset in the row
* @param mergedAccOnHeap true if the mergedAcc is on heap, otherwise
* @param mergedAccExternalTypes the merged acc types
*/
def needMerge(
mergedAccOffset: Int,
mergedAccOnHeap: Boolean,
mergedAccExternalTypes: Array[DataType] = null): AggsHandlerCodeGenerator = {
this.mergedAccOffset = mergedAccOffset
this.mergedAccOnHeap = mergedAccOnHeap
this.mergedAccExternalTypes = mergedAccExternalTypes
this.isMergeNeeded = true
this
}
/**
* Adds window properties such as window_start, window_end
*/
private def initialWindowProperties(
windowProperties: Seq[PlannerWindowProperty],
windowClass: Class[_]): Unit = {
this.windowProperties = windowProperties
this.namespaceClassName = windowClass.getCanonicalName
this.hasNamespace = true
}
/**
* Adds aggregate infos into context
*/
private def initialAggregateInformation(aggInfoList: AggregateInfoList): Unit = {
this.accTypeInfo = RowType.of(
aggInfoList.getAccTypes.map(fromDataTypeToLogicalType): _*)
this.aggBufferSize = accTypeInfo.getFieldCount
var aggBufferOffset: Int = 0
if (mergedAccExternalTypes == null) {
mergedAccExternalTypes = aggInfoList.getAccTypes
}
val aggCodeGens = aggInfoList.aggInfos.map { aggInfo =>
val filterExpr = createFilterExpression(
aggInfo.agg.filterArg,
aggInfo.aggIndex,
aggInfo.agg.name)
val codegen = aggInfo.function match {
case _: DeclarativeAggregateFunction =>
new DeclarativeAggCodeGen(
ctx,
aggInfo,
filterExpr,
mergedAccOffset,
aggBufferOffset,
aggBufferSize,
inputFieldTypes,
constantExprs,
relBuilder)
case _: AggregateFunction[_, _] =>
new ImperativeAggCodeGen(
ctx,
aggInfo,
filterExpr,
mergedAccOffset,
aggBufferOffset,
aggBufferSize,
inputFieldTypes,
constantExprs,
relBuilder,
hasNamespace,
mergedAccOnHeap,
mergedAccExternalTypes(aggBufferOffset),
copyInputField)
}
aggBufferOffset = aggBufferOffset + aggInfo.externalAccTypes.length
codegen
}
val distinctCodeGens = aggInfoList.distinctInfos.zipWithIndex.map {
case (distinctInfo, index) =>
val innerCodeGens = distinctInfo.aggIndexes.map(aggCodeGens(_)).toArray
val distinctIndex = aggCodeGens.length + index
val filterExpr = distinctInfo.filterArgs.map(
createFilterExpression(_, distinctIndex, "distinct aggregate"))
val codegen = new DistinctAggCodeGen(
ctx,
distinctInfo,
index,
innerCodeGens,
filterExpr.toArray,
mergedAccOffset,
aggBufferOffset,
aggBufferSize,
hasNamespace,
isMergeNeeded,
mergedAccOnHeap,
distinctInfo.consumeRetraction,
copyInputField,
relBuilder)
// distinct agg buffer occupies only one field
aggBufferOffset += 1
codegen
}
val distinctAggIndexes = aggInfoList.distinctInfos.flatMap(_.aggIndexes)
val nonDistinctAggIndexes = aggCodeGens.indices.filter(!distinctAggIndexes.contains(_)).toArray
this.aggBufferCodeGens = aggCodeGens ++ distinctCodeGens
this.aggActionCodeGens = nonDistinctAggIndexes.map(aggCodeGens(_)) ++ distinctCodeGens
// when input contains retractions, we inserted a count1 agg in the agg list
// the count1 agg value shouldn't be in the aggregate result
if (aggInfoList.indexOfCountStar.nonEmpty && aggInfoList.countStarInserted) {
ignoreAggValues ++= Array(aggInfoList.indexOfCountStar.get)
}
// the distinct value shouldn't be in the aggregate result
if (aggInfoList.distinctInfos.nonEmpty) {
ignoreAggValues ++= distinctCodeGens.indices.map(_ + aggCodeGens.length)
}
}
/**
* Creates filter argument access expression, none if no filter
*/
private def createFilterExpression(
filterArg: Int,
aggIndex: Int,
aggName: String): Option[Expression] = {
if (filterArg > 0) {
val name = s"agg_${aggIndex}_filter"
val filterType = inputFieldTypes(filterArg)
if (!filterType.isInstanceOf[BooleanType]) {
throw new TableException(s"filter arg must be boolean, but is $filterType, " +
s"the aggregate is $aggName.")
}
Some(new ResolvedAggInputReference(name, filterArg, inputFieldTypes(filterArg)))
} else {
None
}
}
/**
* Generate [[GeneratedAggsHandleFunction]] with the given function name and aggregate infos.
*/
def generateAggsHandler(
name: String,
aggInfoList: AggregateInfoList): GeneratedAggsHandleFunction = {
initialAggregateInformation(aggInfoList)
// generates all methods body first to add necessary reuse code to context
val createAccumulatorsCode = genCreateAccumulators()
val getAccumulatorsCode = genGetAccumulators()
val setAccumulatorsCode = genSetAccumulators()
val resetAccumulatorsCode = genResetAccumulators()
val accumulateCode = genAccumulate()
val retractCode = genRetract()
val mergeCode = genMerge()
val getValueCode = genGetValue()
val functionName = newName(name)
val functionCode =
j"""
public final class $functionName implements $AGGS_HANDLER_FUNCTION {
${ctx.reuseMemberCode()}
public $functionName(java.lang.Object[] references) throws Exception {
${ctx.reuseInitCode()}
}
@Override
public void open($STATE_DATA_VIEW_STORE store) throws Exception {
${ctx.reuseOpenCode()}
}
@Override
public void accumulate($BASE_ROW $ACCUMULATE_INPUT_TERM) throws Exception {
$accumulateCode
}
@Override
public void retract($BASE_ROW $RETRACT_INPUT_TERM) throws Exception {
$retractCode
}
@Override
public void merge($BASE_ROW $MERGED_ACC_TERM) throws Exception {
$mergeCode
}
@Override
public void setAccumulators($BASE_ROW $ACC_TERM) throws Exception {
$setAccumulatorsCode
}
@Override
public void resetAccumulators() throws Exception {
$resetAccumulatorsCode
}
@Override
public $BASE_ROW getAccumulators() throws Exception {
$getAccumulatorsCode
}
@Override
public $BASE_ROW createAccumulators() throws Exception {
$createAccumulatorsCode
}
@Override
public $BASE_ROW getValue() throws Exception {
$getValueCode
}
@Override
public void cleanup() throws Exception {
${ctx.reuseCleanupCode()}
}
@Override
public void close() throws Exception {
${ctx.reuseCloseCode()}
}
}
""".stripMargin
new GeneratedAggsHandleFunction(functionName, functionCode, ctx.references.toArray)
}
/**
* Generate [[GeneratedAggsHandleFunction]] with the given function name and aggregate infos
* and window properties.
*/
def generateNamespaceAggsHandler[N](
name: String,
aggInfoList: AggregateInfoList,
windowProperties: Seq[PlannerWindowProperty],
windowClass: Class[N]): GeneratedNamespaceAggsHandleFunction[N] = {
initialWindowProperties(windowProperties, windowClass)
initialAggregateInformation(aggInfoList)
// generates all methods body first to add necessary reuse code to context
val createAccumulatorsCode = genCreateAccumulators()
val getAccumulatorsCode = genGetAccumulators()
val setAccumulatorsCode = genSetAccumulators()
val accumulateCode = genAccumulate()
val retractCode = genRetract()
val mergeCode = genMerge()
val getValueCode = genGetValue()
val functionName = newName(name)
val functionCode =
j"""
public final class $functionName
implements $NAMESPACE_AGGS_HANDLER_FUNCTION<$namespaceClassName> {
${ctx.reuseMemberCode()}
public $functionName(Object[] references) throws Exception {
${ctx.reuseInitCode()}
}
@Override
public void open($STATE_DATA_VIEW_STORE store) throws Exception {
${ctx.reuseOpenCode()}
}
@Override
public void accumulate($BASE_ROW $ACCUMULATE_INPUT_TERM) throws Exception {
$accumulateCode
}
@Override
public void retract($BASE_ROW $RETRACT_INPUT_TERM) throws Exception {
$retractCode
}
@Override
public void merge(Object ns, $BASE_ROW $MERGED_ACC_TERM) throws Exception {
$namespaceClassName $NAMESPACE_TERM = ($namespaceClassName) ns;
$mergeCode
}
@Override
public void setAccumulators(Object ns, $BASE_ROW $ACC_TERM)
throws Exception {
$namespaceClassName $NAMESPACE_TERM = ($namespaceClassName) ns;
$setAccumulatorsCode
}
@Override
public $BASE_ROW getAccumulators() throws Exception {
$getAccumulatorsCode
}
@Override
public $BASE_ROW createAccumulators() throws Exception {
$createAccumulatorsCode
}
@Override
public $BASE_ROW getValue(Object ns) throws Exception {
$namespaceClassName $NAMESPACE_TERM = ($namespaceClassName) ns;
$getValueCode
}
@Override
public void cleanup(Object ns) throws Exception {
$namespaceClassName $NAMESPACE_TERM = ($namespaceClassName) ns;
${ctx.reuseCleanupCode()}
}
@Override
public void close() throws Exception {
${ctx.reuseCloseCode()}
}
}
""".stripMargin
new GeneratedNamespaceAggsHandleFunction[N](functionName, functionCode, ctx.references.toArray)
}
private def genCreateAccumulators(): String = {
val methodName = "createAccumulators"
ctx.startNewLocalVariableStatement(methodName)
// not need to bind input for ExprCodeGenerator
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
val initAccExprs = aggBufferCodeGens.flatMap(_.createAccumulator(exprGenerator))
val accTerm = newName("acc")
val resultExpr = exprGenerator.generateResultExpression(
initAccExprs,
accTypeInfo,
classOf[GenericRow],
outRow = accTerm,
reusedOutRow = false)
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|${resultExpr.code}
|return ${resultExpr.resultTerm};
""".stripMargin
}
private def genGetAccumulators(): String = {
val methodName = "getAccumulators"
ctx.startNewLocalVariableStatement(methodName)
// no need to bind input
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
val accExprs = aggBufferCodeGens.flatMap(_.getAccumulator(exprGenerator))
val accTerm = newName("acc")
// always create a new accumulator row
val resultExpr = exprGenerator.generateResultExpression(
accExprs,
accTypeInfo,
classOf[GenericRow],
outRow = accTerm,
reusedOutRow = false)
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|${resultExpr.code}
|return ${resultExpr.resultTerm};
""".stripMargin
}
private def genSetAccumulators(): String = {
val methodName = "setAccumulators"
ctx.startNewLocalVariableStatement(methodName)
// bind input1 as accumulators
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
.bindInput(accTypeInfo, inputTerm = ACC_TERM)
val body = aggBufferCodeGens.map(_.setAccumulator(exprGenerator)).mkString("\\n")
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|${ctx.reuseInputUnboxingCode(ACC_TERM)}
|$body
""".stripMargin
}
private def genResetAccumulators(): String = {
val methodName = "resetAccumulators"
ctx.startNewLocalVariableStatement(methodName)
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
val body = aggBufferCodeGens.map(_.resetAccumulator(exprGenerator)).mkString("\\n")
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|$body
""".stripMargin
}
private def genAccumulate(): String = {
if (isAccumulateNeeded) {
// validation check
checkNeededMethods(needAccumulate = true)
val methodName = "accumulate"
ctx.startNewLocalVariableStatement(methodName)
// bind input1 as inputRow
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
.bindInput(inputType, inputTerm = ACCUMULATE_INPUT_TERM)
val body = aggActionCodeGens.map(_.accumulate(exprGenerator)).mkString("\\n")
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|${ctx.reuseInputUnboxingCode(ACCUMULATE_INPUT_TERM)}
|$body
|""".stripMargin
} else {
genThrowException(
"This function not require accumulate method, but the accumulate method is called.")
}
}
private def genRetract(): String = {
if (isRetractNeeded) {
// validation check
checkNeededMethods(needRetract = true)
val methodName = "retract"
ctx.startNewLocalVariableStatement(methodName)
// bind input1 as inputRow
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
.bindInput(inputType, inputTerm = RETRACT_INPUT_TERM)
val body = aggActionCodeGens.map(_.retract(exprGenerator)).mkString("\\n")
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|${ctx.reuseInputUnboxingCode(RETRACT_INPUT_TERM)}
|$body
""".stripMargin
} else {
genThrowException(
"This function not require retract method, but the retract method is called.")
}
}
private def genMerge(): String = {
if (isMergeNeeded) {
// validation check
checkNeededMethods(needMerge = true)
val methodName = "merge"
ctx.startNewLocalVariableStatement(methodName)
// the mergedAcc is partial of mergedInput, such as <key, acc> in local-global, ignore keys
val internalAccTypes = mergedAccExternalTypes.map(fromDataTypeToLogicalType)
val mergedAccType = if (mergedAccOffset > 0) {
// concat padding types and acc types, use int type as padding
// the padding types will be ignored
val padding = Array.range(0, mergedAccOffset).map(_ => new IntType())
RowType.of(padding ++ internalAccTypes: _*)
} else {
RowType.of(internalAccTypes: _*)
}
// bind input1 as otherAcc
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
.bindInput(mergedAccType, inputTerm = MERGED_ACC_TERM)
val body = aggActionCodeGens.map(_.merge(exprGenerator)).mkString("\\n")
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|${ctx.reuseInputUnboxingCode(MERGED_ACC_TERM)}
|$body
""".stripMargin
} else {
genThrowException(
"This function not require merge method, but the merge method is called.")
}
}
private def genGetValue(): String = {
val methodName = "getValue"
ctx.startNewLocalVariableStatement(methodName)
// no need to bind input
val exprGenerator = new ExprCodeGenerator(ctx, INPUT_NOT_NULL)
var valueExprs = aggBufferCodeGens.zipWithIndex.filter { case (_, index) =>
// ignore the count1 agg codegen and distinct agg codegen
ignoreAggValues.isEmpty || !ignoreAggValues.contains(index)
}.map { case (codegen, _) =>
codegen.getValue(exprGenerator)
}
if (hasNamespace) {
// append window property results
val windowExprs = windowProperties.map {
case w: PlannerWindowStart =>
// return a Timestamp(Internal is long)
GeneratedExpression(
s"$NAMESPACE_TERM.getStart()", "false", "", w.resultType)
case w: PlannerWindowEnd =>
// return a Timestamp(Internal is long)
GeneratedExpression(
s"$NAMESPACE_TERM.getEnd()", "false", "", w.resultType)
case r: PlannerRowtimeAttribute =>
// return a rowtime, use long as internal type
GeneratedExpression(
s"$NAMESPACE_TERM.getEnd() - 1", "false", "", r.resultType)
case p: PlannerProctimeAttribute =>
// ignore this property, it will be null at the position later
GeneratedExpression("-1L", "true", "", p.resultType)
}
valueExprs = valueExprs ++ windowExprs
}
val aggValueTerm = newName("aggValue")
valueType = RowType.of(valueExprs.map(_.resultType): _*)
// always create a new result row
val resultExpr = exprGenerator.generateResultExpression(
valueExprs,
valueType,
classOf[GenericRow],
outRow = aggValueTerm,
reusedOutRow = false)
s"""
|${ctx.reuseLocalVariableCode(methodName)}
|${resultExpr.code}
|return ${resultExpr.resultTerm};
""".stripMargin
}
private def checkNeededMethods(
needAccumulate: Boolean = false,
needRetract: Boolean = false,
needMerge: Boolean = false,
needReset: Boolean = false): Unit = {
// check and validate the needed methods
aggBufferCodeGens
.foreach(_.checkNeededMethods(needAccumulate, needRetract, needMerge, needReset))
}
private def genThrowException(msg: String): String = {
s"""
|throw new java.lang.RuntimeException("$msg");
""".stripMargin
}
}
object AggsHandlerCodeGenerator {
/** static terms **/
val ACC_TERM = "acc"
val MERGED_ACC_TERM = "otherAcc"
val ACCUMULATE_INPUT_TERM = "accInput"
val RETRACT_INPUT_TERM = "retractInput"
val DISTINCT_KEY_TERM = "distinctKey"
val NAMESPACE_TERM = "namespace"
val STORE_TERM = "store"
val INPUT_NOT_NULL = false
/**
* Create DataView term, for example, acc1_map_dataview.
*
* @return term to access MapView or ListView
*/
def createDataViewTerm(spec: DataViewSpec): String = {
s"${spec.stateId}_dataview"
}
/**
* Creates BinaryGeneric term which wraps the specific DataView term.
*/
def createDataViewBinaryGenericTerm(spec: DataViewSpec): String = {
s"${createDataViewTerm(spec)}_binary_generic"
}
/**
* Create DataView backup term, for example, acc1_map_dataview_backup.
* The backup dataview term is used for merging two statebackend
* dataviews, e.g. session window.
*
* @return term to access backup MapView or ListView
*/
def createDataViewBackupTerm(spec: DataViewSpec): String = {
s"${spec.stateId}_dataview_backup"
}
/**
* Creates BinaryGeneric term which wraps the specific DataView backup term.
*/
def createDataViewBackupBinaryGenericTerm(spec: DataViewSpec): String = {
s"${createDataViewBackupTerm(spec)}_binary_generic"
}
def addReusableStateDataViews(
ctx: CodeGeneratorContext,
viewSpecs: Array[DataViewSpec],
hasNamespace: Boolean,
enableBackupDataView: Boolean): Unit = {
// add reusable dataviews to context
viewSpecs.foreach { spec =>
val (viewTypeTerm, registerCall) = spec match {
case ListViewSpec(_, _, _) => (className[StateListView[_, _]], "getStateListView")
case MapViewSpec(_, _, _) => (className[StateMapView[_, _, _]], "getStateMapView")
}
val viewFieldTerm = createDataViewTerm(spec)
val viewFieldInternalTerm = createDataViewBinaryGenericTerm(spec)
val viewTypeInfo = ctx.addReusableObject(spec.dataViewTypeInfo, "viewTypeInfo")
val parameters = s""""${spec.stateId}", $viewTypeInfo"""
ctx.addReusableMember(s"private $viewTypeTerm $viewFieldTerm;")
ctx.addReusableMember(s"private $BINARY_GENERIC $viewFieldInternalTerm;")
val openCode =
s"""
|$viewFieldTerm = ($viewTypeTerm) $STORE_TERM.$registerCall($parameters);
|$viewFieldInternalTerm = ${genToInternal(
ctx, fromLegacyInfoToDataType(spec.dataViewTypeInfo), viewFieldTerm)};
""".stripMargin
ctx.addReusableOpenStatement(openCode)
// only cleanup dataview term, do not need to cleanup backup
val cleanupCode = if (hasNamespace) {
s"""
|$viewFieldTerm.setCurrentNamespace($NAMESPACE_TERM);
|$viewFieldTerm.clear();
""".stripMargin
} else {
s"""
|$viewFieldTerm.clear();
""".stripMargin
}
ctx.addReusableCleanupStatement(cleanupCode)
// generate backup dataview codes
if (enableBackupDataView) {
val backupViewTerm = createDataViewBackupTerm(spec)
val backupViewInternalTerm = createDataViewBackupBinaryGenericTerm(spec)
// create backup dataview
ctx.addReusableMember(s"private $viewTypeTerm $backupViewTerm;")
ctx.addReusableMember(s"private $BINARY_GENERIC $backupViewInternalTerm;")
val backupOpenCode =
s"""
|$backupViewTerm = ($viewTypeTerm) $STORE_TERM.$registerCall($parameters);
|$backupViewInternalTerm = ${genToInternal(
ctx, fromLegacyInfoToDataType(spec.dataViewTypeInfo), backupViewTerm)};
""".stripMargin
ctx.addReusableOpenStatement(backupOpenCode)
}
}
}
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/codegen/agg/AggsHandlerCodeGenerator.scala
|
Scala
|
apache-2.0
| 29,259
|
package edu.banda.coel.server.grid.callable
import com.banda.chemistry.business.ChemistryRunSetting
import com.banda.chemistry.domain.ArtificialChemistry
import com.banda.math.business.dynamics.JavaDoubleStatsSingleRunDynamicsAnalysis
import com.banda.math.domain.StatsSequence
import com.banda.math.domain.dynamics.SingleRunAnalysisResult
import com.banda.math.domain.dynamics.SingleRunAnalysisSpec
import com.banda.chemistry.business.ChemistryRunnableFactory
import com.banda.chemistry.domain.AcCompartment
import com.banda.chemistry.domain.AcSimulationConfig
import com.banda.serverbase.grid.ArgumentCallable
/**
* @author Β© Peter Banda
* @since 2014
*/
class GridRerunDerridaAnalysisCall(
chemistryRunnableFactory : ChemistryRunnableFactory,
compartment : AcCompartment,
simulationConfig : AcSimulationConfig,
spec : SingleRunAnalysisSpec) extends ArgumentCallable[SingleRunAnalysisResult, StatsSequence] {
val analysis = new JavaDoubleStatsSingleRunDynamicsAnalysis(spec)
override def call(result : SingleRunAnalysisResult) : StatsSequence = {
val initialState = result.getInitialState
val chemistryRunnable = chemistryRunnableFactory.createNonInteractive(compartment, simulationConfig, None)
val statsSequence = analysis.runDerridaOnly(chemistryRunnable, initialState)
statsSequence.setId(result.getDerridaResults().getId())
statsSequence
}
}
|
peterbanda/coel
|
source/Server/src/main/scala/edu/banda/coel/server/grid/callable/GridRerunDerridaAnalysisCall.scala
|
Scala
|
apache-2.0
| 1,376
|
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.actor
import scala.concurrent.duration._
import akka.pattern.ask
import scala.concurrent.Await
import akka.util.Helpers.ConfigOps
/**
* This object contains elements which make writing actors and related code
* more concise, e.g. when trying out actors in the REPL.
*
* For the communication of non-actor code with actors, you may use anonymous
* actors tailored to this job:
*
* {{{
* import ActorDSL._
* import scala.concurrent.util.duration._
*
* implicit val system: ActorSystem = ...
*
* implicit val i = inbox()
* someActor ! someMsg // replies will go to `i`
*
* val reply = i.receive()
* val transformedReply = i.select(5 seconds) {
* case x: Int => 2 * x
* }
* }}}
*
* The `receive` and `select` methods are synchronous, i.e. they block the
* calling thread until an answer from the actor is received or the timeout
* expires. The default timeout is taken from configuration item
* `akka.actor.dsl.default-timeout`.
*
* When defining actors in the REPL, say, you may want to have a look at the
* `Act` trait:
*
* {{{
* import ActorDSL._
*
* val system: ActorSystem = ...
*
* val a = actor(system, "fred")(new Act {
* val b = actor("barney")(new Act {
* ...
* })
*
* become {
* case msg => ...
* }
* })
* }}}
*
* Note that `actor` can be used with an implicit [[akka.actor.ActorRefFactory]]
* as shown with `"barney"` (where the [[akka.actor.ActorContext]] serves this
* purpose), but since nested declarations share the same
* lexical context `"fred"`βs ActorContext would be ambiguous
* if the [[akka.actor.ActorSystem]] were declared `implicit` (this could also
* be circumvented by shadowing the name `system` within `"fred"`).
*
* <b>Note:</b> If you want to use an `Act with Stash`, you should use the
* `ActWithStash` trait in order to have the actor get the necessary deque-based
* mailbox setting.
*
* @deprecated Use the normal `actorOf` methods defined on `ActorSystem` and `ActorContext` to create Actors instead.
*/
@deprecated("deprecated Use the normal `actorOf` methods defined on `ActorSystem` and `ActorContext` to create Actors instead.", since = "2.5.0")
object ActorDSL extends dsl.Inbox with dsl.Creators {
protected object Extension extends ExtensionId[Extension] with ExtensionIdProvider {
override def lookup = Extension
override def createExtension(system: ExtendedActorSystem): Extension = new Extension(system)
/**
* Java API: retrieve the ActorDSL extension for the given system.
*/
override def get(system: ActorSystem): Extension = super.get(system)
}
protected class Extension(val system: ExtendedActorSystem) extends akka.actor.Extension with InboxExtension {
private case class MkChild(props: Props, name: String) extends NoSerializationVerificationNeeded
private val boss = system.systemActorOf(Props(
new Actor {
def receive = {
case MkChild(props, name) β sender() ! context.actorOf(props, name)
case any β sender() ! any
}
}), "dsl").asInstanceOf[RepointableActorRef]
lazy val config = system.settings.config.getConfig("akka.actor.dsl")
val DSLDefaultTimeout = config.getMillisDuration("default-timeout")
def mkChild(p: Props, name: String): ActorRef =
if (boss.isStarted)
boss.underlying.asInstanceOf[ActorCell].attachChild(p, name, systemService = true)
else {
implicit val timeout = system.settings.CreationTimeout
Await.result(boss ? MkChild(p, name), timeout.duration).asInstanceOf[ActorRef]
}
}
}
/**
* An Inbox is an actor-like object which is interrogated from the outside.
* It contains an actor whose reference can be passed to other actors as
* usual and it can watch other actorsβ lifecycle.
*/
abstract class Inbox {
/**
* Receive the next message from this Inbox. This call will return immediately
* if the internal actor previously received a message, or it will block for
* up to the specified duration to await reception of a message. If no message
* is received a [[java.util.concurrent.TimeoutException]] will be raised.
*/
@throws(classOf[java.util.concurrent.TimeoutException])
def receive(max: FiniteDuration): Any
/**
* Have the internal actor watch the target actor. When the target actor
* terminates a [[Terminated]] message will be received.
*/
def watch(target: ActorRef): Unit
/**
* Obtain a reference to the internal actor, which can then for example be
* registered with the event stream or whatever else you may want to do with
* an [[ActorRef]].
*/
def getRef(): ActorRef
/**
* Have the internal actor act as the sender of the given message which will
* be sent to the given target. This means that should the target actor reply
* then those replies will be received by this Inbox.
*/
def send(target: ActorRef, msg: AnyRef): Unit
}
object Inbox {
/**
* Create a new Inbox within the given system.
*/
def create(system: ActorSystem): Inbox = ActorDSL.inbox()(system)
}
|
rorygraves/perf_tester
|
corpus/akka/akka-actor/src/main/scala/akka/actor/ActorDSL.scala
|
Scala
|
apache-2.0
| 5,183
|
package nz
import org.newdawn.slick.GameContainer
trait Agent {
def update(container: GameContainer): Agent
}
|
wohanley/nz
|
src/main/scala/Agent.scala
|
Scala
|
agpl-3.0
| 114
|
object I0{List(I1 I2=>}
|
lampepfl/dotty
|
tests/fuzzy/AE-a0a760f3522486caf279b74367aaaa1ff0085b35.scala
|
Scala
|
apache-2.0
| 24
|
package com.shocktrade.server.trading
import java.util.Date
import com.shocktrade.util.DateUtil
/**
* Trading Clock
* @author lawrence.daniels@gmail.com
*/
object TradingClock {
/**
* The time in milliseconds until the next trading day
*/
def getDelayUntilTradingStartInMillis = DateUtil.getTradeStartTime.getTime - (new Date).getTime
def isTradingActive = DateUtil.isTradingActive
def isTradingActive(timeInMillis: Long) = DateUtil.isTradingActive(timeInMillis)
def isTradingActive(date: Date) = DateUtil.isTradingActive(date)
}
|
ldaniels528/shocktrade-server
|
app-server/app/com/shocktrade/server/trading/TradingClock.scala
|
Scala
|
apache-2.0
| 557
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.stream
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalTableFunctionScan
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalWindowTableFunction
import org.apache.flink.table.planner.plan.utils.WindowUtil
import org.apache.flink.table.planner.plan.utils.WindowUtil.convertToWindowingStrategy
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rex.RexCall
/**
* Rule to convert a [[FlinkLogicalTableFunctionScan]] with window table function call
* into a [[StreamPhysicalWindowTableFunction]].
*/
class StreamPhysicalWindowTableFunctionRule extends ConverterRule(
classOf[FlinkLogicalTableFunctionScan],
FlinkConventions.LOGICAL,
FlinkConventions.STREAM_PHYSICAL,
"StreamPhysicalWindowTableFunctionRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val scan: FlinkLogicalTableFunctionScan = call.rel(0)
WindowUtil.isWindowTableFunctionCall(scan.getCall)
}
def convert(rel: RelNode): RelNode = {
val scan: FlinkLogicalTableFunctionScan = rel.asInstanceOf[FlinkLogicalTableFunctionScan]
val traitSet: RelTraitSet = rel.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
val newInput = RelOptRule.convert(scan.getInput(0), FlinkConventions.STREAM_PHYSICAL)
new StreamPhysicalWindowTableFunction(
scan.getCluster,
traitSet,
newInput,
scan.getRowType,
convertToWindowingStrategy(scan.getCall.asInstanceOf[RexCall], newInput.getRowType),
false
)
}
}
object StreamPhysicalWindowTableFunctionRule {
val INSTANCE = new StreamPhysicalWindowTableFunctionRule
}
|
StephanEwen/incubator-flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamPhysicalWindowTableFunctionRule.scala
|
Scala
|
apache-2.0
| 2,677
|
/*
* Copyright 2014 Xored Software, Inc.
* Copyright 2015 Gleb Kanterov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.kanterov.scalajs.react.event
import org.scalajs.dom
import scala.scalajs.js
trait SyntheticEvent extends js.Object {
val bubbles: Boolean = js.native
val cancelable: Boolean = js.native
val currentTarget: dom.EventTarget = js.native
val target: dom.EventTarget = js.native
val nativeEvent: dom.Event = js.native
def preventDefault(): Unit = js.native
def stopPropagation(): Unit = js.native
val defaultPrevented: Boolean = js.native
val eventPhase: Int = js.native
val isTrusted: Boolean = js.native
val timeStamp: js.Date = js.native
val `type`: String = js.native
}
trait ClipboardEvent extends SyntheticEvent {
val clipboardData: dom.DataTransfer = js.native
}
trait KeyboardEvent extends SyntheticEvent {
val altKey: Boolean = js.native
val ctrlKey: Boolean = js.native
val metaKey: Boolean = js.native
val shiftKey: Boolean = js.native
val charCode: Int = js.native
val key: String = js.native
val keyCode: Int = js.native
val locale: String = js.native
val location: Int = js.native
val repeat: Boolean = js.native
val which: Int = js.native
def getModifierState(keyArg: String): Boolean = js.native
}
trait FocusEvent extends SyntheticEvent {
val relatedTarget: dom.EventTarget = js.native
}
trait FormEvent extends SyntheticEvent
trait MouseEvent extends SyntheticEvent {
val altKey: Boolean = js.native
val ctrlKey: Boolean = js.native
val metaKey: Boolean = js.native
val shiftKey: Boolean = js.native
val button: Int = js.native
val buttons: Int = js.native
val clientX: Int = js.native
val clientY: Int = js.native
val pageX: Int = js.native
val pageY: Int = js.native
val screenX: Int = js.native
val screenY: Int = js.native
val relatedTarget: dom.EventTarget = js.native
}
trait TouchEvent extends SyntheticEvent {
val altKey: Boolean = js.native
val ctrlKey: Boolean = js.native
val metaKey: Boolean = js.native
val shiftKey: Boolean = js.native
val changedTouches: dom.TouchList = js.native
val targetTouches: dom.TouchList = js.native
val touches: dom.TouchList = js.native
def getModifierState(keyArg: String): Boolean = js.native
}
trait UIEvent extends SyntheticEvent {
val detail: Int = js.native
val view: dom.Window = js.native
}
trait WheelEvent extends SyntheticEvent {
val deltaMode: Int = js.native
val deltaX: Double = js.native
val deltaY: Double = js.native
val deltaZ: Double = js.native
}
|
naderghanbari/scala-js-react
|
scalajs-react/src/main/scala/com/kanterov/scalajs/react/event/SyntheticEvent.scala
|
Scala
|
apache-2.0
| 3,097
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import org.apache.spark.streaming.{Duration, DStream, Time}
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
private[streaming]
class FilteredDStream[T: ClassTag](
parent: DStream[T],
filterFunc: T => Boolean
) extends DStream[T](parent.ssc) {
override def dependencies = List(parent)
override def slideDuration: Duration = parent.slideDuration
override def compute(validTime: Time): Option[RDD[T]] = {
parent.getOrCompute(validTime).map(_.filter(filterFunc))
}
}
|
mkolod/incubator-spark
|
streaming/src/main/scala/org/apache/spark/streaming/dstream/FilteredDStream.scala
|
Scala
|
apache-2.0
| 1,351
|
package domain
import akka.actor.ActorRef
import scala.concurrent.duration._
import com.softwaremill.tagging._
import akka.persistence.fsm.PersistentFSM
import java.time.Instant
import scala.reflect.{ClassTag, classTag}
import java.util.Base64
import services.UserSharder
import domain.identity.DomainEvent
import IdentityAggregate.{State, Data}
object IdentityAggregate {
type Key = String
sealed trait State extends PersistentFSM.FSMState {
override def identifier = this.getClass.getSimpleName.stripSuffix("$")
}
case object Unassociated extends State
case object Associated extends State
sealed trait Data
case class IdentityData(associatedUserId: Option[UserAggregate.Id] = None) extends Data
sealed trait Command extends BaseCommand
case object GetUser extends Command
sealed trait Response extends BaseResponse
sealed trait GetUserResponse extends Response
case class UserFound(userId: UserAggregate.Id) extends GetUserResponse
}
class IdentityAggregate(
userSharder: ActorRef @@ UserSharder.type)
extends PersistentFSM[State, Data, DomainEvent] {
import BaseDomainEvent.now
import domain.identity._
import IdentityAggregate._
val key: IdentityAggregate.Key = new String(Base64.getUrlDecoder().decode(self.path.name), "utf8")
override lazy val persistenceId: String = "Identity-" + self.path.name
startWith(Unassociated, IdentityData())
var notifyWhenAssociated: Seq[ActorRef] = Seq.empty[ActorRef]
when(Unassociated) {
case Event(GetUser, _) =>
notifyWhenAssociated +:= sender
userSharder ! UserSharder.CreateNewUser
stay
case Event(UserAggregate.CreateResponse(userId), _) =>
goto(Associated) applying(AssociatedUser(userId, now))
}
onTransition {
case Unassociated -> Associated =>
// Re-send command now that a reply can be sent
notifyWhenAssociated.foreach { self.tell(GetUser, _) }
// Clear queue because we no longer need it
notifyWhenAssociated = Seq.empty
}
when(Associated) {
case Event(GetUser, IdentityData(Some(userId))) =>
stay replying UserFound(userId)
}
override def applyEvent(domainEvent: DomainEvent, currentData: Data) = (domainEvent, currentData) match {
case (AssociatedUser(userId, _), data: IdentityData) =>
data.copy(associatedUserId = Some(userId))
}
def domainEventClassTag: ClassTag[DomainEvent] = classTag[DomainEvent]
}
|
dit4c/dit4c
|
dit4c-portal/app/domain/IdentityAggregate.scala
|
Scala
|
mit
| 2,425
|
package org.jetbrains.plugins.scala
package lang
package typeConformance
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiComment, PsiElement}
import org.jetbrains.plugins.scala.base.{FailableTest, ScalaLightCodeInsightFixtureTestAdapter, SharedTestProjectToken}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementType
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScMethodCall
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScTypeExt, TypePresentationContext}
import org.jetbrains.plugins.scala.util.TestUtils
import org.junit.Assert.fail
import org.junit.experimental.categories.Category
import java.io.File
/**
* User: Alexander Podkhalyuzin
* Date: 10.03.2009
*/
@Category(Array(classOf[TypecheckerTests]))
abstract class TypeConformanceTestBase extends ScalaLightCodeInsightFixtureTestAdapter with FailableTest {
protected val caretMarker = "/*caret*/"
def folderPath: String = TestUtils.getTestDataPath + "/typeConformance/"
override protected def sharedProjectToken = SharedTestProjectToken(this.getClass)
protected def doTest(fileText: String, fileName: String = getTestName(false) + ".scala", checkEquivalence: Boolean = false): Unit = {
configureFromFileText(ScalaFileType.INSTANCE, fileText.trim)
doTestInner(checkEquivalence)
}
private def errorMessage(title: String, expected: Boolean, declaredType: ScType, rhsType: ScType)(implicit tpc: TypePresentationContext) = {
s"""$title
|Expected result: $expected
|declared type: ${declaredType.presentableText}
|rhs type: ${rhsType.presentableText}""".stripMargin
}
private def doTestInner(checkEquivalence: Boolean): Unit = {
implicit val tpc: TypePresentationContext = TypePresentationContext.emptyContext
val (declaredType, rhsType) = declaredAndExpressionTypes()
val expected = expectedResult
if (checkEquivalence) {
val equiv1 = rhsType.equiv(declaredType)
val equiv2 = declaredType.equiv(rhsType)
if (equiv1 != expected || equiv2 != expected) {
if (!shouldPass) return
fail(errorMessage("Equivalence failure", expected, declaredType, rhsType))
}
if (expected) {
val conforms = rhsType.conforms(declaredType)
if (!conforms) {
if (!shouldPass) return
fail(errorMessage("Conformance failure", expected, declaredType, rhsType))
}
}
}
else {
val res: Boolean = rhsType.conforms(declaredType)
if (expected != res) {
if (!shouldPass) return
fail(errorMessage("Conformance failure", expected, declaredType, rhsType))
}
}
if (!shouldPass) fail(failingPassed)
}
protected def doTest(): Unit = {
doTest(false)
}
protected def doTest(checkEquivalence: Boolean): Unit = {
configureFromFile()
doTestInner(checkEquivalence)
}
protected def configureFromFile(fileName: String = getTestName(false) + ".scala"): Unit = {
val filePath = folderPath + fileName
val file = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
assert(file != null, "file " + filePath + " not found")
val fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
configureFromFileText(ScalaFileType.INSTANCE, fileText.trim)
}
protected def declaredAndExpressionTypes(): (ScType, ScType) = {
val scalaFile = getFile.asInstanceOf[ScalaFile]
val caretIndex = scalaFile.getText.indexOf(caretMarker)
val patternDef =
if (caretIndex > 0) {
PsiTreeUtil.findElementOfClassAtOffset(scalaFile, caretIndex, classOf[ScPatternDefinition], false)
}
else scalaFile.findLastChildByTypeScala[PsiElement](ScalaElementType.PATTERN_DEFINITION).orNull
assert(patternDef != null, "Not specified expression in range to check conformance.")
val valueDecl = patternDef.asInstanceOf[ScPatternDefinition]
val declaredType = valueDecl.declaredType.getOrElse(sys.error("Must provide type annotation for LHS"))
val expr = valueDecl.expr.getOrElse(sys.error("Expression not found"))
expr.`type`() match {
case Right(rhsType) => (declaredType, rhsType)
case Failure(msg) => sys.error(s"Couldn't compute type of ${expr.getText}: $msg")
}
}
private def expectedResult: Boolean = {
val scalaFile = getFile.asInstanceOf[ScalaFile]
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ => fail("Test result must be in last comment statement")
}
val expectedResult = java.lang.Boolean.parseBoolean(output.asInstanceOf[String])
expectedResult
}
def doApplicationConformanceTest(fileText: String, fileName: String = "dummy.scala"): Unit = {
import org.junit.Assert._
configureFromFileText(ScalaFileType.INSTANCE, fileText.trim)
val scalaFile = getFile.asInstanceOf[ScalaFile]
val caretIndex = scalaFile.getText.indexOf(caretMarker)
val element = if (caretIndex > 0) {
PsiTreeUtil.findElementOfClassAtOffset(scalaFile, caretIndex, classOf[ScMethodCall], false)
}
else scalaFile.findLastChildByTypeScala[PsiElement](ScalaElementType.METHOD_CALL).orNull
assertNotNull("Failed to locate application",element)
val application = element.asInstanceOf[ScMethodCall]
val errors = scala.collection.mutable.ArrayBuffer[String]()
val expectedResult = scalaFile.findElementAt(scalaFile.getText.length - 1) match {
case c: PsiComment => c.getText
case _ => "True"
}
for ((expr, param) <- application.matchedParameters) {
val exprTp = expr.`type`().getOrElse(throw new RuntimeException(s"Failed to get type of expression(${expr.getText})"))
val res = exprTp.conforms(param.paramType)
if (res != expectedResult.toBoolean)
errors +=
s"""
|Expected: $expectedResult
|Param tp: ${param.paramType.presentableText(TypePresentationContext.emptyContext)}
|Arg tp: ${exprTp.presentableText(TypePresentationContext.emptyContext)}
""".stripMargin
}
assertTrue(if (shouldPass) "Conformance failure:\\n"+ errors.mkString("\\n\\n").trim else failingPassed, !shouldPass ^ errors.isEmpty)
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/typeConformance/TypeConformanceTestBase.scala
|
Scala
|
apache-2.0
| 6,961
|
package net.tomasherman.specus.server.grid
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
import org.specs2.mock.Mockito
import org.specs2.matcher.ThrownExpectations
import net.tomasherman.specus.server.api.grid.{NoNodeRegisteredException, NodeID, NodeWithNameAlreadyRegisteredException, Node}
/**
* This file is part of Specus.
*
* Specus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Specus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with Specus. If not, see <http://www.gnu.org/licenses/>.
*
*/
class testableSNM extends SpecusNodeManager {
def getCache = this.keysCache
def getIdx = this.balIndex
def setIdx(i:Int) { balIndex = i }
def nameM = this.nameMap
def nodeM = this.nodeMap
def setNameM(map:Map[String,NodeID]) {this.nameMap = map}
def setNodeM(map:Map[NodeID,(Node,String)]) {this.nodeMap = map}
def updateC = this.updateCache
}
trait SNMSScope extends Scope with Mockito with ThrownExpectations{
val mgr = new testableSNM
val n1 = mock[Node]
val n2 = mock[Node]
val n3 = mock[Node]
val name1 = "1"
val name2 = "2"
val name3 = "2" //conflict
val id1 = mock[NodeID]
val id2 = mock[NodeID]
def prepareMaps() {
mgr.setNodeM(Map((id1,(n1,name1)),(id2,(n2,name2))))
mgr.setNameM(Map((name1,id1),(name2,id2)))
mgr.updateC
}
}
class SpecusNodeManagerSpec extends Specification {
"SpecusNodeManager" should {
"register node properly" in new SNMSScope {
val idd1 = mgr.registerNode(n1,name1)
mgr.nameM must_== Map((name1,idd1))
mgr.nodeM must_== Map((idd1,(n1,name1)))
val idd2 = mgr.registerNode(n2,name2)
mgr.nameM must_== Map((name2,idd2),(name1,idd1))
mgr.nodeM must_== Map((idd2,(n2,name2)),(idd1,(n1,name1)))
mgr.registerNode(n3,name3) must throwA[NodeWithNameAlreadyRegisteredException]
}
"remove node properly" in new SNMSScope {
prepareMaps()
mgr.setIdx(1)
mgr.removeNode(id1) //should force balIndex to be 0
mgr.getIdx must_== 0
mgr.nodeM must_== Map((id2,(n2,name2)))
mgr.nameM must_== Map((name2,id2))
mgr.removeNode(id2)
mgr.getIdx must_== 0
mgr.nodeM must_== Map()
mgr.nameM must_== Map()
}
"return names properly" in new SNMSScope{
prepareMaps()
mgr.names must_== Set(name1,name2)
}
"return proper node id" in new SNMSScope{
prepareMaps()
mgr.nodeId(name1) must_== Some(id1)
mgr.nodeId(name2) must_== Some(id2)
mgr.nodeId("rofl ain't exist") must_== None
}
"write to proper node" in new SNMSScope{
prepareMaps()
mgr.writeToNode(id1,null)
there was one(n1).write(null) then no(n2).write(null)
mgr.writeToNode(id2,null)
there was one(n1).write(null) then one(n2).write(null)
}
"balanced write properly" in new SNMSScope {
mgr.balancedWrite(null) must throwA[NoNodeRegisteredException]
prepareMaps()
mgr.balancedWrite(null)
mgr.balancedWrite(null)
mgr.balancedWrite(null)
mgr.balancedWrite(null)
there were two(n1).write(null) then two(n2).write(null)
}
}
}
|
tomasherman/specus
|
server/src/test/scala/net/tomasherman/specus/server/grid/SpecusNodeManagerSpec.scala
|
Scala
|
gpl-3.0
| 3,603
|
package org.jetbrains.plugins.scala
package lang.psi.stubs
import com.intellij.psi.stubs.{DefaultStubBuilder, StubElement}
import com.intellij.psi.{PsiElement, PsiFile}
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScFileStubImpl
/**
* User: Alexander Podkhalyuzin
* Date: 12.02.2010
*/
class ScalaFileStubBuilder extends DefaultStubBuilder {
protected override def createStubForFile(file: PsiFile): StubElement[_ <: PsiElement] =
new ScFileStubImpl(file.getViewProvider.getPsi(ScalaLanguage.INSTANCE).asInstanceOf[ScalaFile])
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/ScalaFileStubBuilder.scala
|
Scala
|
apache-2.0
| 606
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.examples.log
import wvlet.airframe.examples.log.Log_01_LogSupport.name
import wvlet.log.LogSupport
/**
*/
object Log_01_LogSupport extends App {
import wvlet.log.LogSupport
val name = "airframe-log"
MyApp
}
object MyApp extends LogSupport {
info(s"Hello ${name}!")
warn("This is a warning message")
debug("Debug messages will not be shown by default")
}
|
wvlet/airframe
|
examples/src/main/scala/wvlet/airframe/examples/log/Log_01_LogSupport.scala
|
Scala
|
apache-2.0
| 957
|
package nl.dekkr.feedfrenzy.backend.extractor
import com.typesafe.scalalogging.Logger
import nl.dekkr.feedfrenzy.backend.model.{Action, Article, RawVariables}
import org.slf4j.LoggerFactory
class ArticleListExtractor extends ActionExecutor {
override protected val logger = Logger(LoggerFactory.getLogger("[ArticleListExtractor]"))
val AL = new ArticleLinksExtractor
val AR = new ArticleExtractor
def getList(url: String, input: String, blockActions: List[Action], articleActions: List[Action]): Seq[Article] =
AL.getList(input,blockActions).urls.map{ case e => AR.getArticle(url,e,articleActions)}
def getRawList(url: String, input: String, blockActions: List[Action], articleActions: List[Action]): RawVariables =
AL.getRaw(input,blockActions)
def getRawArticles(url: String, input: String, blockActions: List[Action], articleActions: List[Action]): Seq[RawVariables] =
AL.getList(input,blockActions).urls.map{ case e => RawVariables(variables= AR.getRaw(url,e,articleActions).variables,input=e)}
}
|
dekkr/feedfrenzy-backend
|
src/main/scala/nl/dekkr/feedfrenzy/backend/extractor/ArticleListExtractor.scala
|
Scala
|
mit
| 1,036
|
package edu.berkeley.veloxms.storage
import java.nio.ByteBuffer
import edu.berkeley.veloxms.util.KryoThreadLocal
object StorageUtils {
def toByteArr[T](id: T): Array[Byte] = {
// val key = ByteBuffer.allocate(8)
// key.putLong(id).array()
//val buffer = ByteBuffer.allocate(12)
val kryo = KryoThreadLocal.kryoTL.get
val result = kryo.serialize(id).array
result
}
// could make this a z-curve key instead
def twoDimensionKey(key1: Long, key2: Long): Array[Byte] = {
val key = ByteBuffer.allocate(16)
key.putLong(key1)
key.putLong(key2)
key.array()
}
}
|
samklr/velox-modelserver
|
veloxms-core/src/main/scala/edu/berkeley/veloxms/storage/StorageUtils.scala
|
Scala
|
apache-2.0
| 607
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import joptsimple._
import org.I0Itec.zkclient.ZkClient
import kafka.utils._
import kafka.consumer.SimpleConsumer
import kafka.api.{OffsetFetchResponse, OffsetFetchRequest, OffsetRequest}
import kafka.common.{OffsetMetadataAndError, ErrorMapping, BrokerNotAvailableException, TopicAndPartition}
import org.apache.kafka.common.protocol.SecurityProtocol
import org.apache.kafka.common.security.JaasUtils
import scala.collection._
import kafka.client.ClientUtils
import kafka.network.BlockingChannel
import kafka.api.PartitionOffsetRequestInfo
import org.I0Itec.zkclient.exception.ZkNoNodeException
object ConsumerOffsetChecker extends Logging {
private val consumerMap: mutable.Map[Int, Option[SimpleConsumer]] = mutable.Map()
private val offsetMap: mutable.Map[TopicAndPartition, Long] = mutable.Map()
private var topicPidMap: immutable.Map[String, Seq[Int]] = immutable.Map()
private def getConsumer(zkUtils: ZkUtils, bid: Int): Option[SimpleConsumer] = {
try {
zkUtils.readDataMaybeNull(ZkUtils.BrokerIdsPath + "/" + bid)._1 match {
case Some(brokerInfoString) =>
Json.parseFull(brokerInfoString) match {
case Some(m) =>
val brokerInfo = m.asInstanceOf[Map[String, Any]]
val host = brokerInfo.get("host").get.asInstanceOf[String]
val port = brokerInfo.get("port").get.asInstanceOf[Int]
Some(new SimpleConsumer(host, port, 10000, 100000, "ConsumerOffsetChecker"))
case None =>
throw new BrokerNotAvailableException("Broker id %d does not exist".format(bid))
}
case None =>
throw new BrokerNotAvailableException("Broker id %d does not exist".format(bid))
}
} catch {
case t: Throwable =>
println("Could not parse broker info due to " + t.getCause)
None
}
}
private def processPartition(zkUtils: ZkUtils,
group: String, topic: String, pid: Int) {
val topicPartition = TopicAndPartition(topic, pid)
val offsetOpt = offsetMap.get(topicPartition)
val groupDirs = new ZKGroupTopicDirs(group, topic)
val owner = zkUtils.readDataMaybeNull(groupDirs.consumerOwnerDir + "/%s".format(pid))._1
zkUtils.getLeaderForPartition(topic, pid) match {
case Some(bid) =>
val consumerOpt = consumerMap.getOrElseUpdate(bid, getConsumer(zkUtils, bid))
consumerOpt match {
case Some(consumer) =>
val topicAndPartition = TopicAndPartition(topic, pid)
val request =
OffsetRequest(immutable.Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.LatestTime, 1)))
val logSize = consumer.getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition).offsets.head
val lagString = offsetOpt.map(o => if (o == -1) "unknown" else (logSize - o).toString)
println("%-15s %-30s %-3s %-15s %-15s %-15s %s".format(group, topic, pid, offsetOpt.getOrElse("unknown"), logSize, lagString.getOrElse("unknown"),
owner match {case Some(ownerStr) => ownerStr case None => "none"}))
case None => // ignore
}
case None =>
println("No broker for partition %s - %s".format(topic, pid))
}
}
private def processTopic(zkUtils: ZkUtils, group: String, topic: String) {
topicPidMap.get(topic) match {
case Some(pids) =>
pids.sorted.foreach {
pid => processPartition(zkUtils, group, topic, pid)
}
case None => // ignore
}
}
private def printBrokerInfo() {
println("BROKER INFO")
for ((bid, consumerOpt) <- consumerMap)
consumerOpt match {
case Some(consumer) =>
println("%s -> %s:%d".format(bid, consumer.host, consumer.port))
case None => // ignore
}
}
def main(args: Array[String]) {
warn("WARNING: ConsumerOffsetChecker is deprecated and will be dropped in releases following 0.9.0. Use ConsumerGroupCommand instead.")
val parser = new OptionParser()
val zkConnectOpt = parser.accepts("zookeeper", "ZooKeeper connect string.").
withRequiredArg().defaultsTo("localhost:2181").ofType(classOf[String])
val topicsOpt = parser.accepts("topic",
"Comma-separated list of consumer topics (all topics if absent).").
withRequiredArg().ofType(classOf[String])
val groupOpt = parser.accepts("group", "Consumer group.").
withRequiredArg().ofType(classOf[String])
val channelSocketTimeoutMsOpt = parser.accepts("socket.timeout.ms", "Socket timeout to use when querying for offsets.").
withRequiredArg().ofType(classOf[java.lang.Integer]).defaultsTo(6000)
val channelRetryBackoffMsOpt = parser.accepts("retry.backoff.ms", "Retry back-off to use for failed offset queries.").
withRequiredArg().ofType(classOf[java.lang.Integer]).defaultsTo(3000)
parser.accepts("broker-info", "Print broker info")
parser.accepts("help", "Print this message.")
if(args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "Check the offset of your consumers.")
val options = parser.parse(args : _*)
if (options.has("help")) {
parser.printHelpOn(System.out)
System.exit(0)
}
CommandLineUtils.checkRequiredArgs(parser, options, groupOpt, zkConnectOpt)
val zkConnect = options.valueOf(zkConnectOpt)
val group = options.valueOf(groupOpt)
val groupDirs = new ZKGroupDirs(group)
val channelSocketTimeoutMs = options.valueOf(channelSocketTimeoutMsOpt).intValue()
val channelRetryBackoffMs = options.valueOf(channelRetryBackoffMsOpt).intValue()
val topics = if (options.has(topicsOpt)) Some(options.valueOf(topicsOpt)) else None
var zkUtils: ZkUtils = null
var channel: BlockingChannel = null
try {
zkUtils = ZkUtils(zkConnect,
30000,
30000,
JaasUtils.isZkSecurityEnabled())
val topicList = topics match {
case Some(x) => x.split(",").view.toList
case None => zkUtils.getChildren(groupDirs.consumerGroupDir + "/owners").toList
}
topicPidMap = immutable.Map(zkUtils.getPartitionsForTopics(topicList).toSeq:_*)
val topicPartitions = topicPidMap.flatMap { case(topic, partitionSeq) => partitionSeq.map(TopicAndPartition(topic, _)) }.toSeq
val channel = ClientUtils.channelToOffsetManager(group, zkUtils, channelSocketTimeoutMs, channelRetryBackoffMs)
debug("Sending offset fetch request to coordinator %s:%d.".format(channel.host, channel.port))
channel.send(OffsetFetchRequest(group, topicPartitions))
val offsetFetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload())
debug("Received offset fetch response %s.".format(offsetFetchResponse))
offsetFetchResponse.requestInfo.foreach { case (topicAndPartition, offsetAndMetadata) =>
if (offsetAndMetadata == OffsetMetadataAndError.NoOffset) {
val topicDirs = new ZKGroupTopicDirs(group, topicAndPartition.topic)
// this group may not have migrated off zookeeper for offsets storage (we don't expose the dual-commit option in this tool
// (meaning the lag may be off until all the consumers in the group have the same setting for offsets storage)
try {
val offset = zkUtils.readData(topicDirs.consumerOffsetDir + "/%d".format(topicAndPartition.partition))._1.toLong
offsetMap.put(topicAndPartition, offset)
} catch {
case z: ZkNoNodeException =>
if(zkUtils.pathExists(topicDirs.consumerOffsetDir))
offsetMap.put(topicAndPartition,-1)
else
throw z
}
}
else if (offsetAndMetadata.error == ErrorMapping.NoError)
offsetMap.put(topicAndPartition, offsetAndMetadata.offset)
else {
println("Could not fetch offset for %s due to %s.".format(topicAndPartition, ErrorMapping.exceptionFor(offsetAndMetadata.error)))
}
}
channel.disconnect()
println("%-15s %-30s %-3s %-15s %-15s %-15s %s".format("Group", "Topic", "Pid", "Offset", "logSize", "Lag", "Owner"))
topicList.sorted.foreach {
topic => processTopic(zkUtils, group, topic)
}
if (options.has("broker-info"))
printBrokerInfo()
for ((_, consumerOpt) <- consumerMap)
consumerOpt match {
case Some(consumer) => consumer.close()
case None => // ignore
}
}
catch {
case t: Throwable =>
println("Exiting due to: %s.".format(t.getMessage))
}
finally {
for (consumerOpt <- consumerMap.values) {
consumerOpt match {
case Some(consumer) => consumer.close()
case None => // ignore
}
}
if (zkUtils != null)
zkUtils.close()
if (channel != null)
channel.disconnect()
}
}
}
|
junrao/kafka
|
core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala
|
Scala
|
apache-2.0
| 9,870
|
package org.elasticmq
case class NewMessageData(
id: Option[MessageId],
content: String,
messageAttributes: Map[String, MessageAttribute],
nextDelivery: NextDelivery,
messageGroupId: Option[String],
messageDeduplicationId: Option[DeduplicationId],
orderIndex: Int,
tracingId: Option[TracingId],
sequenceNumber: Option[String]
)
|
adamw/elasticmq
|
core/src/main/scala/org/elasticmq/NewMessageData.scala
|
Scala
|
apache-2.0
| 365
|
package chapter.five
object ExerciseEight extends App {
// todo: preconditions checks
// todo: make defaults static fields?
class Car(val manufacturer: String,
val modelName: String,
val modelYear: Int = -1,
var licensePlate: String = "") {
}
}
|
deekim/impatient-scala
|
src/main/scala/chapter/five/ExerciseEight.scala
|
Scala
|
apache-2.0
| 298
|
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette
/**
* Represents a linked login for an identity (i.e. a local username/password or a Facebook/Google account).
*
* The login info contains the data about the provider that authenticated that identity.
*
* @param providerID The ID of the provider.
* @param providerKey A unique key which identifies a user on this provider (userID, email, ...).
*/
case class LoginInfo(providerID: String, providerKey: String)
|
datalek/silhouette
|
silhouette/src/main/scala/silhouette/LoginInfo.scala
|
Scala
|
apache-2.0
| 1,202
|
package play.boilerplate.parser.backend
case class ParserException(msg: String) extends Exception(msg)
|
Romastyi/sbt-play-boilerplate
|
sbt-plugin/lib/src/main/scala/play/boilerplate/parser/backend/ParserException.scala
|
Scala
|
apache-2.0
| 104
|
package au.gov.dva.sopapi.sopref.parsing.implementations.parsers.paragraphReferenceSplitters
import au.gov.dva.sopapi.ConfigurationRuntimeException
import au.gov.dva.sopapi.sopref.parsing.traits.ParaReferenceSplitter
class OldSoPStyleParaReferenceSplitter extends ParaReferenceSplitter {
val hasSubsRefsRegex = """([0-9]+\\([a-z]+\\))(\\([a-z]+\\)""".r
override def hasSubParas(fullReference: String): Boolean = hasSubsRefsRegex.findFirstIn(fullReference).isDefined
override def trySplitParagraphReferenceToMainParagraphAndFirstLevelSubParagraph(fullReference: String): (String, String) = {
if (!hasSubParas(fullReference)) throw new ConfigurationRuntimeException("SoP factor reference does not have sub paragraphs: " + fullReference)
def splitNewStyleSopSubParaReference(wholeRef: String) = {
// take to end of first )
assert(hasSubParas(wholeRef))
val m = hasSubsRefsRegex.findFirstMatchIn(wholeRef).get
(m.group(1), m.group(2))
}
splitNewStyleSopSubParaReference(fullReference)
}
}
|
govlawtech/dva-sop-api
|
app/src/main/scala/au/gov/dva/sopapi/sopref/parsing/implementations/parsers/paragraphReferenceSplitters/OldStyleParaReferenceSplitter.scala
|
Scala
|
apache-2.0
| 1,041
|
package org.hammerlab.guacamole.reads
import debox.Buffer
import htsjdk.samtools.{ Cigar, CigarOperator }
import org.bdgenomics.adam.util.MdTag
import org.hammerlab.guacamole.Bases
import scala.collection.JavaConversions
object MDTagUtils {
/**
* Adopted from ADAM's mdTag.getReference to operate on Seq[Byte] instead of strings
* Also we use a Buffer from spire here instead to avoid an additional array allocation and ensure
* we return an Array[Byte]
*
* Given a mdtag, read sequence, cigar, and a reference start position, returns the reference.
*
* @param readSequence The base sequence of the read.
* @param cigar The cigar for the read.
* @return A sequence of bytes corresponding to the reference overlapping this read.
*/
def getReference(mdTag: MdTag,
readSequence: Seq[Byte],
cigar: Cigar,
allowNBase: Boolean): Seq[Byte] = {
var referencePos = mdTag.start
var readPos = 0
var reference: Buffer[Byte] = Buffer.empty
// loop over all cigar elements
JavaConversions.asScalaBuffer(cigar.getCigarElements).foreach(cigarElement => {
cigarElement.getOperator match {
case CigarOperator.M | CigarOperator.EQ | CigarOperator.X => {
// if we are a match, loop over bases in element
for (i <- 0 until cigarElement.getLength) {
// if a mismatch, get from the mismatch set, else pull from read
mdTag.mismatches.get(referencePos) match {
case Some(base) => reference += base.toByte
case _ => reference += readSequence(readPos).toByte
}
readPos += 1
referencePos += 1
}
}
case CigarOperator.N if allowNBase => {
referencePos += cigarElement.getLength
reference ++= Buffer.fill(cigarElement.getLength)(Bases.N)
}
case CigarOperator.D => {
// if a delete, get from the delete pool
for (i <- 0 until cigarElement.getLength) {
reference += {
mdTag.deletions.get(referencePos) match {
case Some(base) => base.toByte
case _ => throw new IllegalStateException("Could not find deleted base at cigar offset " + i)
}
}
referencePos += 1
}
}
case _ => {
// ignore inserts
if (cigarElement.getOperator.consumesReadBases) {
readPos += cigarElement.getLength
}
if (cigarElement.getOperator.consumesReferenceBases) {
throw new IllegalArgumentException("Cannot handle operator: " + cigarElement.getOperator)
}
}
}
})
reference.toArray
}
def getReference(read: MappedRead, allowNBase: Boolean): (Long, Seq[Byte]) = {
read.mdTagOpt match {
case None => throw ReferenceWithoutMDTagException(read)
case Some(mdTag) => {
val readReferenceStart = mdTag.start
(readReferenceStart, getReference(mdTag, read.sequence, read.cigar, allowNBase))
}
}
}
/**
* Rebuilds the reference from a set of overlapping and sorted reads
* Fill in N if there is a gap in the reads
*
* @param sortedReads Set of overlapping and sorted reads that are mapped to the reference
* @param referenceStart locus at which to start reference sequence (inclusive)
* @param referenceEnd locus at which to end reference sequence (exclusive)
* @return A sequence of bytes corresponding to the reference overlapping these read.
*/
def getReference(sortedReads: Seq[MappedRead],
referenceStart: Long,
referenceEnd: Long): Array[Byte] = {
assert(referenceStart < referenceEnd, s"Reference sequence coordinates invalid, $referenceEnd <= $referenceStart")
def padWithNBases(seq: Array[Byte], startOffset: Int, endOffset: Int): Unit = {
var i = startOffset
while (i < endOffset) {
seq(i) = Bases.N
i += 1
}
}
val referenceSeq: Array[Byte] = Array.ofDim((referenceEnd - referenceStart).toInt)
val readsBuffer = sortedReads.iterator.buffered
var currentLocus = referenceStart
var lastReadStart = 0L
while (readsBuffer.hasNext && currentLocus < referenceEnd) {
val read = readsBuffer.next()
// assume reads are sorted
assume(read.start >= lastReadStart)
// Pad with N's if currentLocus is earlier than the next read
if (currentLocus < read.start) {
val readReferenceStart = math.min(referenceEnd, read.start)
padWithNBases(referenceSeq, (currentLocus - referenceStart).toInt, (readReferenceStart - referenceStart).toInt)
currentLocus = readReferenceStart
}
// Find last read that overlaps the current locus
if (!readsBuffer.hasNext || currentLocus < readsBuffer.head.start) {
val (readReferenceStart, readReferenceSequence) = MDTagUtils.getReference(read, allowNBase = true)
val readReferenceEnd = math.min(readReferenceStart + readReferenceSequence.length, referenceEnd)
(currentLocus until readReferenceEnd).foreach(
locus => {
referenceSeq((locus - referenceStart).toInt) = readReferenceSequence((locus - readReferenceStart).toInt)
}
)
currentLocus = readReferenceEnd
}
lastReadStart = read.start
}
// Pad with N's if referenceEnd is after the last read
if (referenceEnd > currentLocus) {
padWithNBases(referenceSeq, (currentLocus - referenceStart).toInt, (referenceEnd - referenceStart).toInt)
}
referenceSeq
}
}
|
bikash/guacamole
|
src/main/scala/org/hammerlab/guacamole/reads/MDTagUtils.scala
|
Scala
|
apache-2.0
| 5,683
|
package com.ajjpj.adiagram_.ui.mouse
import com.ajjpj.adiagram_.ui.fw.{Command, Digest}
import com.ajjpj.adiagram_.ui.{ADiagramController, ResizeDirection, AScreenPos}
import com.ajjpj.adiagram_.model.diagram.AShapeSpec
import com.ajjpj.adiagram_.geometry.APoint
/**
* @author arno
*/
private[mouse] class ResizingBoxMouseTrackerState(ctrl: ADiagramController, stateMachine: MouseTrackerSM, handles: BoxHandles, dir: ResizeDirection, initialPos: AScreenPos)(implicit digest: Digest)
extends NullMouseTrackerState(ctrl, stateMachine) {
implicit def zoom = ctrl.zoom
private var prevPos = initialPos
override def onDragged(p: AScreenPos) {
val delta = p - prevPos
ctrl.selections.selectedShapes.foreach(doResize(_, delta.toModel))
prevPos = p
}
override def onReleased(p: AScreenPos) {
digest.undoRedo.push(new ResizeCommand(ctrl.selections.selectedShapes, dir, (p - initialPos).toModel))
stateMachine.changeState(new DefaultMouseTrackerState(ctrl, stateMachine))
}
override def cleanup() {
handles.unbind()
}
private def doResize(shape: AShapeSpec, delta: APoint) = {
//TODO limit resizing - maintain a minimum size
val deltaPosX = if(dir.left) delta.x else 0.0
val deltaPosY = if(dir.top) delta.y else 0.0
val deltaWidth = if(dir.left) -delta.x else if(dir.right) delta.x else 0.0
val deltaHeight = if(dir.top) -delta.y else if(dir.bottom) delta.y else 0.0
shape.moveBy ((deltaPosX, deltaPosY))
shape.atomicUpdate {
shape.resizeBy ((deltaWidth, deltaHeight))
}
}
class ResizeCommand(selSnapshot: Traversable[AShapeSpec], dirSnapshot: ResizeDirection, deltaSnapshot: APoint) extends Command {
def name = "Resize" //TODO add type of shape
def isNop = deltaSnapshot == APoint.ZERO
def undo() {selSnapshot.foreach(doResize(_, deltaSnapshot.inverse))}
def redo() {selSnapshot.foreach(doResize(_, deltaSnapshot))}
}
}
|
arnohaase/a-diagram
|
src/main/scala-old/com/ajjpj/adiagram_/ui/mouse/ResizingBoxMouseTrackerState.scala
|
Scala
|
apache-2.0
| 1,940
|
/*
* Copyright Β© 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package lib.ml.classification
import api._
import lib.linalg._
class EvalSparkSpec extends EvalSpec with SparkAware {
override def actPrecision(h: DVector => Boolean, ps: Seq[Point]) =
withDefaultSparkSession(implicit spark => emma.onSpark {
eval.precision(h)(DataBag(ps))
})
override def actRecall(h: DVector => Boolean, ps: Seq[Point]) =
withDefaultSparkSession(implicit spark => emma.onSpark {
eval.recall(h)(DataBag(ps))
})
override def actF1Score(h: DVector => Boolean, ps: Seq[Point]) =
withDefaultSparkSession(implicit spark => emma.onSpark {
eval.f1score(h)(DataBag(ps))
})
}
|
emmalanguage/emma
|
emma-lib-spark/src/test/scala/org/emmalanguage/lib/ml/classification/EvalSparkSpec.scala
|
Scala
|
apache-2.0
| 1,282
|
package com.programmaticallyspeaking.ncd.nashorn
import com.programmaticallyspeaking.ncd.infra.CompiledScript
object NameConvert {
def sourceNameToUrl(typeName: => String, sourceName: String): String = sourceName match {
case x if x.contains("<eval>") =>
// For evaluated scripts, convert the type name into something that resembles a file URI.
"eval:/" + typeNameToUrl(typeName)
case CompiledScript(cs) =>
cs.url
case _ =>
sourceName // keep it simple
}
private def typeNameToUrl(typeName: String): String = {
typeName
.replace("jdk.nashorn.internal.scripts.", "")
.replace('.', '/')
.replace('\\\\', '/')
.replaceAll("[$^_]", "")
.replaceFirst("/eval/?$", "")
}
}
|
provegard/ncdbg
|
src/main/scala/com/programmaticallyspeaking/ncd/nashorn/NameConvert.scala
|
Scala
|
bsd-3-clause
| 745
|
package org.moe.runtime
import scala.util.matching.Regex
object MoeUtil {
// perlish modulo
def perlModuloOp(a: Int, b: Int): Int = {
val res = a % b
if (a < 0)
if (b > 0 && res < 0) res + b else res
else
if (b < 0) res + b else res
}
// strings matching the alphanumeric pattern
// /^[a-zA-Z]*[0-9]*\\z/ are incrementable
def magicalStringIncrement(str: String): String = {
def incr_numeric_part(n: String) = {
val len = n.length
("%0" + len + "d").format(n.toInt + 1).toString
}
def incr_alpha_part(s: String) = {
def succ(c: Char, carry: Boolean): (Char, Boolean) = {
if (carry)
c match {
case 'z' => ('a', true)
case 'Z' => ('A', true)
case _ => ((c.toInt + 1).toChar, false)
}
else
(c, false)
}
var carry = true
val incremented = (for (c <- s.reverse) yield {
val succ_c = succ(c, carry)
carry = succ_c._2
succ_c._1
}).reverse
(if (carry) (incremented.head.toString ++ incremented) else incremented).mkString
}
def increment_in_parts(alpha: String, numeric: String) = {
if (alpha.isEmpty) {
// no alpha prefix; increment as numeric
incr_numeric_part(numeric)
} else if (numeric.isEmpty) {
// only alpha part
incr_alpha_part(alpha)
} else {
// both alpha and numeric parts exist. increment numeric
// part first; if it carries over then increment alpha part
val next_n = incr_numeric_part(numeric)
if (next_n.length == numeric.length)
alpha + next_n
else
incr_alpha_part(alpha) + next_n.tail
}
}
val alpha_numeric = """^([a-zA-Z]*)([0-9]*)\\z""".r
str match {
case alpha_numeric(alpha, numeric) => increment_in_parts(alpha, numeric)
case _ => throw new MoeErrors.MoeException("string is not incrementable")
}
}
}
|
MoeOrganization/moe
|
src/main/scala/org/moe/runtime/MoeUtil.scala
|
Scala
|
mit
| 1,983
|
package $package$.drawerNavigation
import sri.universal.styles.InlineStyleSheetUniversal
import scala.scalajs.js
object GlobalStyles extends InlineStyleSheetUniversal {
import dsl._
val navScreenContainer = style(flex := 1)
val wholeContainer = style(flex := 1)
val drawerHeader = style(height := 200,
justifyContent.center,
alignItems.center,
backgroundColor := Colors.red500)
val drawerHeaderLogo =
style(fontWeight.bold, fontSize := 20, color := "white")
val drawerItems = style(marginTop := 0)
val sampleText =
style(margin := 14, shadowOffset := js.Dynamic.literal(height = 10))
}
|
scalajs-react-interface/mobile.g8
|
src/main/scaffolds/drawerNavigation/src/main/scala/$package$/drawerNavigation/GlobalStyles.scala
|
Scala
|
apache-2.0
| 699
|
package at.ac.tuwien.ifs.ir.evaluation.pool
import at.ac.tuwien.ifs.ir.model.{Document, QRels, Runs}
/**
* Created by aldo on 10/09/15.
*/
class Pool(val lRuns: List[Runs], gT: QRels) {
def getName : String = ???
lazy val pool: Map[Int, Set[Document]] = ???
lazy val qRels:QRels = gT
def getNewInstance(lRuns: List[Runs]): Pool = Pool(lRuns, gT)
}
object Pool {
def apply(lRuns: List[Runs], gT: QRels) = new Pool(lRuns, gT)
}
|
aldolipani/PoolBiasEstimators
|
src/main/scala/at/ac/tuwien/ifs/ir/evaluation/pool/Pool.scala
|
Scala
|
apache-2.0
| 446
|
package toguru.api
/**
* The toguru client containing providers of the data needed for making toggling decisions.
*
* For usage examples, see [[toguru.play.PlaySupport]]
*
* @param clientProvider the client provider that extracts client information relevant for toggling.
* @param activationsProvider the activations provider that returns the current activation state.
* @tparam T the input type off the client info provider.
*/
class ToguruClient[T](val clientProvider: ClientInfo.Provider[T], val activationsProvider: Activations.Provider) {
/**
* Create a new toggling information instance from the given input
*
* @param input the input for creating the client information
* @return
*/
def apply(input: T): Toggling = TogglingInfo(clientProvider(input), activationsProvider())
/**
* Check whether the toguru system is healthy
*
* @return
*/
def healthy(): Boolean = activationsProvider.healthy()
}
|
andreas-schroeder/toguru-scala-client
|
src/main/scala/toguru/api/ToguruClient.scala
|
Scala
|
mit
| 966
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2007-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package math
import scala.language.implicitConversions
import java.math.{ MathContext, BigDecimal => BigDec }
import scala.collection.immutable.NumericRange
/**
* @author Stephane Micheloud
* @author Rex Kerr
* @version 1.1
* @since 2.7
*/
object BigDecimal {
private final val maximumHashScale = 4934 // Quit maintaining hash identity with BigInt beyond this scale
private final val hashCodeNotComputed = 0x5D50690F // Magic value (happens to be "BigDecimal" old MurmurHash3 value)
private final val deci2binary = 3.3219280948873626 // Ratio of log(10) to log(2)
private val minCached = -512
private val maxCached = 512
val defaultMathContext = MathContext.DECIMAL128
/** Cache only for defaultMathContext using BigDecimals in a small range. */
private lazy val cache = new Array[BigDecimal](maxCached - minCached + 1)
object RoundingMode extends Enumeration {
// Annoying boilerplate to ensure consistency with java.math.RoundingMode
import java.math.{RoundingMode => RM}
type RoundingMode = Value
val UP = Value(RM.UP.ordinal)
val DOWN = Value(RM.DOWN.ordinal)
val CEILING = Value(RM.CEILING.ordinal)
val FLOOR = Value(RM.FLOOR.ordinal)
val HALF_UP = Value(RM.HALF_UP.ordinal)
val HALF_DOWN = Value(RM.HALF_DOWN.ordinal)
val HALF_EVEN = Value(RM.HALF_EVEN.ordinal)
val UNNECESSARY = Value(RM.UNNECESSARY.ordinal)
}
/** Constructs a `BigDecimal` using the decimal text representation of `Double` value `d`, rounding if necessary. */
def decimal(d: Double, mc: MathContext): BigDecimal =
new BigDecimal(new BigDec(java.lang.Double.toString(d), mc), mc)
/** Constructs a `BigDecimal` using the decimal text representation of `Double` value `d`. */
def decimal(d: Double): BigDecimal = decimal(d, defaultMathContext)
/** Constructs a `BigDecimal` using the decimal text representation of `Float` value `f`, rounding if necessary.
* Note that `BigDecimal.decimal(0.1f) != 0.1f` since equality agrees with the `Double` representation, and
* `0.1 != 0.1f`.
*/
def decimal(f: Float, mc: MathContext): BigDecimal =
new BigDecimal(new BigDec(java.lang.Float.toString(f), mc), mc)
/** Constructs a `BigDecimal` using the decimal text representation of `Float` value `f`.
* Note that `BigDecimal.decimal(0.1f) != 0.1f` since equality agrees with the `Double` representation, and
* `0.1 != 0.1f`.
*/
def decimal(f: Float): BigDecimal = decimal(f, defaultMathContext)
// This exists solely to avoid conversion from Int/Long to Float, screwing everything up.
/** Constructs a `BigDecimal` from a `Long`, rounding if necessary. This is identical to `BigDecimal(l, mc)`. */
def decimal(l: Long, mc: MathContext): BigDecimal = apply(l, mc)
// This exists solely to avoid conversion from Int/Long to Float, screwing everything up.
/** Constructs a `BigDecimal` from a `Long`. This is identical to `BigDecimal(l)`. */
def decimal(l: Long): BigDecimal = apply(l)
/** Constructs a `BigDecimal` using a `java.math.BigDecimal`, rounding if necessary. */
def decimal(bd: BigDec, mc: MathContext): BigDecimal = new BigDecimal(bd.round(mc), mc)
/** Constructs a `BigDecimal` by expanding the binary fraction
* contained by `Double` value `d` into a decimal representation,
* rounding if necessary. When a `Float` is converted to a
* `Double`, the binary fraction is preserved, so this method
* also works for converted `Float`s.
*/
def binary(d: Double, mc: MathContext): BigDecimal = new BigDecimal(new BigDec(d, mc), mc)
/** Constructs a `BigDecimal` by expanding the binary fraction
* contained by `Double` value `d` into a decimal representation.
* Note: this also works correctly on converted `Float`s.
*/
def binary(d: Double): BigDecimal = binary(d, defaultMathContext)
/** Constructs a `BigDecimal` from a `java.math.BigDecimal`. The
* precision is the default for `BigDecimal` or enough to represent
* the `java.math.BigDecimal` exactly, whichever is greater.
*/
def exact(repr: BigDec): BigDecimal = {
val mc =
if (repr.precision <= defaultMathContext.getPrecision) defaultMathContext
else new MathContext(repr.precision, java.math.RoundingMode.HALF_EVEN)
new BigDecimal(repr, mc)
}
/** Constructs a `BigDecimal` by fully expanding the binary fraction
* contained by `Double` value `d`, adjusting the precision as
* necessary. Note: this works correctly on converted `Float`s also.
*/
def exact(d: Double): BigDecimal = exact(new BigDec(d))
/** Constructs a `BigDecimal` that exactly represents a `BigInt`.
*/
def exact(bi: BigInt): BigDecimal = exact(new BigDec(bi.bigInteger))
/** Constructs a `BigDecimal` that exactly represents a `Long`. Note that
* all creation methods for `BigDecimal` that do not take a `MathContext`
* represent a `Long`; this is equivalent to `apply`, `valueOf`, etc..
*/
def exact(l: Long): BigDecimal = apply(l)
/** Constructs a `BigDecimal` that exactly represents the number
* specified in a `String`.
*/
def exact(s: String): BigDecimal = exact(new BigDec(s))
/** Constructs a `BigDecimal` that exactly represents the number
* specified in base 10 in a character array.
*/
def exact(cs: Array[Char]): BigDecimal = exact(new BigDec(cs))
/** Constructs a `BigDecimal` using the java BigDecimal static
* valueOf constructor. Equivalent to `BigDecimal.decimal`.
*
* @param d the specified double value
* @return the constructed `BigDecimal`
*/
def valueOf(d: Double): BigDecimal = apply(BigDec valueOf d)
/** Constructs a `BigDecimal` using the java BigDecimal static
* valueOf constructor, specifying a `MathContext` that is
* used for computations but isn't used for rounding. Use
* `BigDecimal.decimal` to use `MathContext` for rounding,
* or `BigDecimal(java.math.BigDecimal.valueOf(d), mc)` for
* no rounding.
*
* @param d the specified double value
* @param mc the `MathContext` used for future computations
* @return the constructed `BigDecimal`
*/
@deprecated("MathContext is not applied to Doubles in valueOf. Use BigDecimal.decimal to use rounding, or java.math.BigDecimal.valueOf to avoid it.", "2.11.0")
def valueOf(d: Double, mc: MathContext): BigDecimal = apply(BigDec valueOf d, mc)
/** Constructs a `BigDecimal` using the java BigDecimal static
* valueOf constructor.
*
* @param x the specified `Long` value
* @return the constructed `BigDecimal`
*/
def valueOf(x: Long): BigDecimal = apply(x)
/** Constructs a `BigDecimal` using the java BigDecimal static
* valueOf constructor. This is unlikely to do what you want;
* use `valueOf(f.toDouble)` or `decimal(f)` instead.
*/
@deprecated("Float arguments to valueOf may not do what you wish. Use decimal or valueOf(f.toDouble).", "2.11.0")
def valueOf(f: Float): BigDecimal = valueOf(f.toDouble)
/** Constructs a `BigDecimal` using the java BigDecimal static
* valueOf constructor. This is unlikely to do what you want;
* use `valueOf(f.toDouble)` or `decimal(f)` instead.
*/
@deprecated("Float arguments to valueOf may not do what you wish. Use decimal or valueOf(f.toDouble).", "2.11.0")
def valueOf(f: Float, mc: MathContext): BigDecimal = valueOf(f.toDouble, mc)
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified `Integer` value.
*
* @param i the specified integer value
* @return the constructed `BigDecimal`
*/
def apply(i: Int): BigDecimal = apply(i, defaultMathContext)
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified `Integer` value, rounding if necessary.
*
* @param i the specified integer value
* @param mc the precision and rounding mode for creation of this value and future operations on it
* @return the constructed `BigDecimal`
*/
def apply(i: Int, mc: MathContext): BigDecimal =
if (mc == defaultMathContext && minCached <= i && i <= maxCached) {
val offset = i - minCached
var n = cache(offset)
if (n eq null) { n = new BigDecimal(BigDec.valueOf(i.toLong), mc); cache(offset) = n }
n
}
else apply(i.toLong, mc)
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified long value.
*
* @param l the specified long value
* @return the constructed `BigDecimal`
*/
def apply(l: Long): BigDecimal =
if (minCached <= l && l <= maxCached) apply(l.toInt)
else new BigDecimal(BigDec.valueOf(l), defaultMathContext)
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified long value, but rounded if necessary.
*
* @param l the specified long value
* @param mc the precision and rounding mode for creation of this value and future operations on it
* @return the constructed `BigDecimal`
*/
def apply(l: Long, mc: MathContext): BigDecimal =
new BigDecimal(new BigDec(l, mc), mc)
/** Constructs a `BigDecimal` whose unscaled value is equal to that
* of the specified long value.
*
* @param unscaledVal the value
* @param scale the scale
* @return the constructed `BigDecimal`
*/
def apply(unscaledVal: Long, scale: Int): BigDecimal =
apply(BigInt(unscaledVal), scale)
/** Constructs a `BigDecimal` whose unscaled value is equal to that
* of the specified long value, but rounded if necessary.
*
* @param unscaledVal the value
* @param scale the scale
* @param mc the precision and rounding mode for creation of this value and future operations on it
* @return the constructed `BigDecimal`
*/
def apply(unscaledVal: Long, scale: Int, mc: MathContext): BigDecimal =
apply(BigInt(unscaledVal), scale, mc)
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified double value. Equivalent to `BigDecimal.decimal`.
*
* @param d the specified `Double` value
* @return the constructed `BigDecimal`
*/
def apply(d: Double): BigDecimal = decimal(d, defaultMathContext)
// note we don't use the static valueOf because it doesn't let us supply
// a MathContext, but we should be duplicating its logic, modulo caching.
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified double value, but rounded if necessary. Equivalent to
* `BigDecimal.decimal`.
*
* @param d the specified `Double` value
* @param mc the precision and rounding mode for creation of this value and future operations on it
* @return the constructed `BigDecimal`
*/
def apply(d: Double, mc: MathContext): BigDecimal = decimal(d, mc)
@deprecated("The default conversion from Float may not do what you want. Use BigDecimal.decimal for a String representation, or explicitly convert the Float with .toDouble.", "2.11.0")
def apply(x: Float): BigDecimal = apply(x.toDouble)
@deprecated("The default conversion from Float may not do what you want. Use BigDecimal.decimal for a String representation, or explicitly convert the Float with .toDouble.", "2.11.0")
def apply(x: Float, mc: MathContext): BigDecimal = apply(x.toDouble, mc)
/** Translates a character array representation of a `BigDecimal`
* into a `BigDecimal`.
*/
def apply(x: Array[Char]): BigDecimal = exact(x)
/** Translates a character array representation of a `BigDecimal`
* into a `BigDecimal`, rounding if necessary.
*/
def apply(x: Array[Char], mc: MathContext): BigDecimal =
new BigDecimal(new BigDec(x, mc), mc)
/** Translates the decimal String representation of a `BigDecimal`
* into a `BigDecimal`.
*/
def apply(x: String): BigDecimal = exact(x)
/** Translates the decimal String representation of a `BigDecimal`
* into a `BigDecimal`, rounding if necessary.
*/
def apply(x: String, mc: MathContext): BigDecimal =
new BigDecimal(new BigDec(x, mc), mc)
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified `BigInt` value.
*
* @param x the specified `BigInt` value
* @return the constructed `BigDecimal`
*/
def apply(x: BigInt): BigDecimal = exact(x)
/** Constructs a `BigDecimal` whose value is equal to that of the
* specified `BigInt` value, rounding if necessary.
*
* @param x the specified `BigInt` value
* @param mc the precision and rounding mode for creation of this value and future operations on it
* @return the constructed `BigDecimal`
*/
def apply(x: BigInt, mc: MathContext): BigDecimal =
new BigDecimal(new BigDec(x.bigInteger, mc), mc)
/** Constructs a `BigDecimal` whose unscaled value is equal to that
* of the specified `BigInt` value.
*
* @param unscaledVal the specified `BigInt` value
* @param scale the scale
* @return the constructed `BigDecimal`
*/
def apply(unscaledVal: BigInt, scale: Int): BigDecimal =
exact(new BigDec(unscaledVal.bigInteger, scale))
/** Constructs a `BigDecimal` whose unscaled value is equal to that
* of the specified `BigInt` value.
*
* @param unscaledVal the specified `BigInt` value
* @param scale the scale
* @param mc the precision and rounding mode for creation of this value and future operations on it
* @return the constructed `BigDecimal`
*/
def apply(unscaledVal: BigInt, scale: Int, mc: MathContext): BigDecimal =
new BigDecimal(new BigDec(unscaledVal.bigInteger, scale, mc), mc)
/** Constructs a `BigDecimal` from a `java.math.BigDecimal`. */
def apply(bd: BigDec): BigDecimal = apply(bd, defaultMathContext)
@deprecated("This method appears to round a java.math.BigDecimal but actually doesn't. Use new BigDecimal(bd, mc) instead for no rounding, or BigDecimal.decimal(bd, mc) for rounding.", "2.11.0")
def apply(bd: BigDec, mc: MathContext): BigDecimal = new BigDecimal(bd, mc)
/** Implicit conversion from `Int` to `BigDecimal`. */
implicit def int2bigDecimal(i: Int): BigDecimal = apply(i)
/** Implicit conversion from `Long` to `BigDecimal`. */
implicit def long2bigDecimal(l: Long): BigDecimal = apply(l)
/** Implicit conversion from `Double` to `BigDecimal`. */
implicit def double2bigDecimal(d: Double): BigDecimal = decimal(d)
/** Implicit conversion from `java.math.BigDecimal` to `scala.BigDecimal`. */
implicit def javaBigDecimal2bigDecimal(x: BigDec): BigDecimal = apply(x)
}
/**
* `BigDecimal` represents decimal floating-point numbers of arbitrary precision.
* By default, the precision approximately matches that of IEEE 128-bit floating
* point numbers (34 decimal digits, `HALF_EVEN` rounding mode). Within the range
* of IEEE binary128 numbers, `BigDecimal` will agree with `BigInt` for both
* equality and hash codes (and will agree with primitive types as well). Beyond
* that range--numbers with more than 4934 digits when written out in full--the
* `hashCode` of `BigInt` and `BigDecimal` is allowed to diverge due to difficulty
* in efficiently computing both the decimal representation in `BigDecimal` and the
* binary representation in `BigInt`.
*
* When creating a `BigDecimal` from a `Double` or `Float`, care must be taken as
* the binary fraction representation of `Double` and `Float` does not easily
* convert into a decimal representation. Three explicit schemes are available
* for conversion. `BigDecimal.decimal` will convert the floating-point number
* to a decimal text representation, and build a `BigDecimal` based on that.
* `BigDecimal.binary` will expand the binary fraction to the requested or default
* precision. `BigDecimal.exact` will expand the binary fraction to the
* full number of digits, thus producing the exact decimal value corresponding to
* the binary fraction of that floating-point number. `BigDecimal` equality
* matches the decimal expansion of `Double`: `BigDecimal.decimal(0.1) == 0.1`.
* Note that since `0.1f != 0.1`, the same is not true for `Float`. Instead,
* `0.1f == BigDecimal.decimal((0.1f).toDouble)`.
*
* To test whether a `BigDecimal` number can be converted to a `Double` or
* `Float` and then back without loss of information by using one of these
* methods, test with `isDecimalDouble`, `isBinaryDouble`, or `isExactDouble`
* or the corresponding `Float` versions. Note that `BigInt`'s `isValidDouble`
* will agree with `isExactDouble`, not the `isDecimalDouble` used by default.
*
* `BigDecimal` uses the decimal representation of binary floating-point numbers
* to determine equality and hash codes. This yields different answers than
* conversion between `Long` and `Double` values, where the exact form is used.
* As always, since floating-point is a lossy representation, it is advisable to
* take care when assuming identity will be maintained across multiple conversions.
*
* `BigDecimal` maintains a `MathContext` that determines the rounding that
* is applied to certain calculations. In most cases, the value of the
* `BigDecimal` is also rounded to the precision specified by the `MathContext`.
* To create a `BigDecimal` with a different precision than its `MathContext`,
* use `new BigDecimal(new java.math.BigDecimal(...), mc)`. Rounding will
* be applied on those mathematical operations that can dramatically change the
* number of digits in a full representation, namely multiplication, division,
* and powers. The left-hand argument's `MathContext` always determines the
* degree of rounding, if any, and is the one propagated through arithmetic
* operations that do not apply rounding themselves.
*
* @author Stephane Micheloud
* @author Rex Kerr
* @version 1.1
*/
final class BigDecimal(val bigDecimal: BigDec, val mc: MathContext)
extends ScalaNumber with ScalaNumericConversions with Serializable with Ordered[BigDecimal] {
def this(bigDecimal: BigDec) = this(bigDecimal, BigDecimal.defaultMathContext)
import BigDecimal.RoundingMode._
import BigDecimal.{decimal, binary, exact}
if (bigDecimal eq null) throw new IllegalArgumentException("null value for BigDecimal")
if (mc eq null) throw new IllegalArgumentException("null MathContext for BigDecimal")
// There was an implicit to cut down on the wrapper noise for BigDec -> BigDecimal.
// However, this may mask introduction of surprising behavior (e.g. lack of rounding
// where one might expect it). Wrappers should be applied explicitly with an
// eye to correctness.
// Sane hash code computation (which is surprisingly hard).
// Note--not lazy val because we can't afford the extra space.
private final var computedHashCode: Int = BigDecimal.hashCodeNotComputed
private final def computeHashCode(): Unit = {
computedHashCode =
if (isWhole && (precision - scale) < BigDecimal.maximumHashScale) toBigInt.hashCode
else if (isDecimalDouble) doubleValue.##
else {
val temp = bigDecimal.stripTrailingZeros
scala.util.hashing.MurmurHash3.mixLast( temp.scaleByPowerOfTen(temp.scale).toBigInteger.hashCode, temp.scale )
}
}
/** Returns the hash code for this BigDecimal.
* Note that this does not merely use the underlying java object's
* `hashCode` because we compare `BigDecimal`s with `compareTo`
* which deems 2 == 2.00, whereas in java these are unequal
* with unequal `hashCode`s. These hash codes agree with `BigInt`
* for whole numbers up ~4934 digits (the range of IEEE 128 bit floating
* point). Beyond this, hash codes will disagree; this prevents the
* explicit representation of the `BigInt` form for `BigDecimal` values
* with large exponents.
*/
override def hashCode(): Int = {
if (computedHashCode == BigDecimal.hashCodeNotComputed) computeHashCode
computedHashCode
}
/** Compares this BigDecimal with the specified value for equality. Where `Float` and `Double`
* disagree, `BigDecimal` will agree with the `Double` value
*/
override def equals (that: Any): Boolean = that match {
case that: BigDecimal => this equals that
case that: BigInt =>
that.bitLength > (precision-scale-2)*BigDecimal.deci2binary &&
this.toBigIntExact.exists(that equals _)
case that: Double =>
!that.isInfinity && {
val d = toDouble
!d.isInfinity && d == that && equals(decimal(d))
}
case that: Float =>
!that.isInfinity && {
val f = toFloat
!f.isInfinity && f == that && equals(decimal(f.toDouble))
}
case _ => isValidLong && unifiedPrimitiveEquals(that)
}
override def isValidByte = noArithmeticException(toByteExact)
override def isValidShort = noArithmeticException(toShortExact)
override def isValidChar = isValidInt && toIntExact >= Char.MinValue && toIntExact <= Char.MaxValue
override def isValidInt = noArithmeticException(toIntExact)
def isValidLong = noArithmeticException(toLongExact)
/** Tests whether the value is a valid Float. "Valid" has several distinct meanings, however. Use
* `isExactFloat`, `isBinaryFloat`, or `isDecimalFloat`, depending on the intended meaning.
* By default, `decimal` creation is used, so `isDecimalFloat` is probably what you want.
*/
@deprecated("What constitutes validity is unclear. Use `isExactFloat`, `isBinaryFloat`, or `isDecimalFloat` instead.", "2.11.0")
def isValidFloat = {
val f = toFloat
!f.isInfinity && bigDecimal.compareTo(new BigDec(f.toDouble)) == 0
}
/** Tests whether the value is a valid Double. "Valid" has several distinct meanings, however. Use
* `isExactDouble`, `isBinaryDouble`, or `isDecimalDouble`, depending on the intended meaning.
* By default, `decimal` creation is used, so `isDecimalDouble` is probably what you want.
*/
@deprecated("Validity has distinct meanings. Use `isExactDouble`, `isBinaryDouble`, or `isDecimalDouble` instead.", "2.11.0")
def isValidDouble = {
val d = toDouble
!d.isInfinity && bigDecimal.compareTo(new BigDec(d)) == 0
}
/** Tests whether this `BigDecimal` holds the decimal representation of a `Double`. */
def isDecimalDouble = {
val d = toDouble
!d.isInfinity && equals(decimal(d))
}
/** Tests whether this `BigDecimal` holds the decimal representation of a `Float`. */
def isDecimalFloat = {
val f = toFloat
!f.isInfinity && equals(decimal(f))
}
/** Tests whether this `BigDecimal` holds, to within precision, the binary representation of a `Double`. */
def isBinaryDouble = {
val d = toDouble
!d.isInfinity && equals(binary(d,mc))
}
/** Tests whether this `BigDecimal` holds, to within precision, the binary representation of a `Float`. */
def isBinaryFloat = {
val f = toFloat
!f.isInfinity && equals(binary(f,mc))
}
/** Tests whether this `BigDecimal` holds the exact expansion of a `Double`'s binary fractional form into base 10. */
def isExactDouble = {
val d = toDouble
!d.isInfinity && equals(exact(d))
}
/** Tests whether this `BigDecimal` holds the exact expansion of a `Float`'s binary fractional form into base 10. */
def isExactFloat = {
val f = toFloat
!f.isInfinity && equals(exact(f.toDouble))
}
private def noArithmeticException(body: => Unit): Boolean = {
try { body ; true }
catch { case _: ArithmeticException => false }
}
def isWhole() = scale <= 0 || bigDecimal.stripTrailingZeros.scale <= 0
def underlying = bigDecimal
/** Compares this BigDecimal with the specified BigDecimal for equality.
*/
def equals (that: BigDecimal): Boolean = compare(that) == 0
/** Compares this BigDecimal with the specified BigDecimal
*/
def compare (that: BigDecimal): Int = this.bigDecimal compareTo that.bigDecimal
/** Addition of BigDecimals
*/
def + (that: BigDecimal): BigDecimal = new BigDecimal(this.bigDecimal add that.bigDecimal, mc)
/** Subtraction of BigDecimals
*/
def - (that: BigDecimal): BigDecimal = new BigDecimal(this.bigDecimal subtract that.bigDecimal, mc)
/** Multiplication of BigDecimals
*/
def * (that: BigDecimal): BigDecimal = new BigDecimal(this.bigDecimal.multiply(that.bigDecimal, mc), mc)
/** Division of BigDecimals
*/
def / (that: BigDecimal): BigDecimal = new BigDecimal(this.bigDecimal.divide(that.bigDecimal, mc), mc)
/** Division and Remainder - returns tuple containing the result of
* divideToIntegralValue and the remainder. The computation is exact: no rounding is applied.
*/
def /% (that: BigDecimal): (BigDecimal, BigDecimal) =
this.bigDecimal.divideAndRemainder(that.bigDecimal) match {
case Array(q, r) => (new BigDecimal(q, mc), new BigDecimal(r, mc))
}
/** Divide to Integral value.
*/
def quot (that: BigDecimal): BigDecimal =
new BigDecimal(this.bigDecimal divideToIntegralValue that.bigDecimal, mc)
/** Returns the minimum of this and that, or this if the two are equal
*/
def min (that: BigDecimal): BigDecimal = (this compare that) match {
case x if x <= 0 => this
case _ => that
}
/** Returns the maximum of this and that, or this if the two are equal
*/
def max (that: BigDecimal): BigDecimal = (this compare that) match {
case x if x >= 0 => this
case _ => that
}
/** Remainder after dividing this by that.
*/
def remainder (that: BigDecimal): BigDecimal = new BigDecimal(this.bigDecimal remainder that.bigDecimal, mc)
/** Remainder after dividing this by that.
*/
def % (that: BigDecimal): BigDecimal = this remainder that
/** Returns a BigDecimal whose value is this ** n.
*/
def pow (n: Int): BigDecimal = new BigDecimal(this.bigDecimal.pow(n, mc), mc)
/** Returns a BigDecimal whose value is the negation of this BigDecimal
*/
def unary_- : BigDecimal = new BigDecimal(this.bigDecimal.negate(), mc)
/** Returns the absolute value of this BigDecimal
*/
def abs: BigDecimal = if (signum < 0) unary_- else this
/** Returns the sign of this BigDecimal;
* -1 if it is less than 0,
* +1 if it is greater than 0,
* 0 if it is equal to 0.
*/
def signum: Int = this.bigDecimal.signum()
/** Returns the precision of this `BigDecimal`.
*/
def precision: Int = this.bigDecimal.precision()
/** Returns a BigDecimal rounded according to the supplied MathContext settings, but
* preserving its own MathContext for future operations.
*/
def round(mc: MathContext): BigDecimal = {
val r = this.bigDecimal round mc
if (r eq bigDecimal) this else new BigDecimal(r, this.mc)
}
/** Returns a `BigDecimal` rounded according to its own `MathContext` */
def rounded: BigDecimal = {
val r = bigDecimal round mc
if (r eq bigDecimal) this else new BigDecimal(r, mc)
}
/** Returns the scale of this `BigDecimal`.
*/
def scale: Int = this.bigDecimal.scale()
/** Returns the size of an ulp, a unit in the last place, of this BigDecimal.
*/
def ulp: BigDecimal = new BigDecimal(this.bigDecimal.ulp, mc)
/** Returns a new BigDecimal based on the supplied MathContext, rounded as needed.
*/
def apply(mc: MathContext): BigDecimal = new BigDecimal(this.bigDecimal round mc, mc)
/** Returns a `BigDecimal` whose scale is the specified value, and whose value is
* numerically equal to this BigDecimal's.
*/
def setScale(scale: Int): BigDecimal =
if (this.scale == scale) this
else new BigDecimal(this.bigDecimal setScale scale, mc)
def setScale(scale: Int, mode: RoundingMode): BigDecimal =
if (this.scale == scale) this
else new BigDecimal(this.bigDecimal.setScale(scale, mode.id), mc)
/** Converts this BigDecimal to a Byte.
* If the BigDecimal is too big to fit in a Byte, only the low-order 8 bits are returned.
* Note that this conversion can lose information about the overall magnitude of the
* BigDecimal value as well as return a result with the opposite sign.
*/
override def byteValue = intValue.toByte
/** Converts this BigDecimal to a Short.
* If the BigDecimal is too big to fit in a Short, only the low-order 16 bits are returned.
* Note that this conversion can lose information about the overall magnitude of the
* BigDecimal value as well as return a result with the opposite sign.
*/
override def shortValue = intValue.toShort
/** Converts this BigDecimal to a Char.
* If the BigDecimal is too big to fit in a Char, only the low-order 16 bits are returned.
* Note that this conversion can lose information about the overall magnitude of the
* BigDecimal value and that it always returns a positive result.
*/
def charValue = intValue.toChar
/** Converts this BigDecimal to an Int.
* If the BigDecimal is too big to fit in an Int, only the low-order 32 bits
* are returned. Note that this conversion can lose information about the
* overall magnitude of the BigDecimal value as well as return a result with
* the opposite sign.
*/
def intValue = this.bigDecimal.intValue
/** Converts this BigDecimal to a Long.
* If the BigDecimal is too big to fit in a Long, only the low-order 64 bits
* are returned. Note that this conversion can lose information about the
* overall magnitude of the BigDecimal value as well as return a result with
* the opposite sign.
*/
def longValue = this.bigDecimal.longValue
/** Converts this BigDecimal to a Float.
* if this BigDecimal has too great a magnitude to represent as a float,
* it will be converted to `Float.NEGATIVE_INFINITY` or
* `Float.POSITIVE_INFINITY` as appropriate.
*/
def floatValue = this.bigDecimal.floatValue
/** Converts this BigDecimal to a Double.
* if this BigDecimal has too great a magnitude to represent as a double,
* it will be converted to `Double.NEGATIVE_INFINITY` or
* `Double.POSITIVE_INFINITY` as appropriate.
*/
def doubleValue = this.bigDecimal.doubleValue
/** Converts this `BigDecimal` to a [[scala.Byte]], checking for lost information.
* If this `BigDecimal` has a nonzero fractional part, or is out of the possible
* range for a [[scala.Byte]] result, then a `java.lang.ArithmeticException` is
* thrown.
*/
def toByteExact = bigDecimal.byteValueExact
/** Converts this `BigDecimal` to a [[scala.Short]], checking for lost information.
* If this `BigDecimal` has a nonzero fractional part, or is out of the possible
* range for a [[scala.Short]] result, then a `java.lang.ArithmeticException` is
* thrown.
*/
def toShortExact = bigDecimal.shortValueExact
/** Converts this `BigDecimal` to a [[scala.Int]], checking for lost information.
* If this `BigDecimal` has a nonzero fractional part, or is out of the possible
* range for an [[scala.Int]] result, then a `java.lang.ArithmeticException` is
* thrown.
*/
def toIntExact = bigDecimal.intValueExact
/** Converts this `BigDecimal` to a [[scala.Long]], checking for lost information.
* If this `BigDecimal` has a nonzero fractional part, or is out of the possible
* range for a [[scala.Long]] result, then a `java.lang.ArithmeticException` is
* thrown.
*/
def toLongExact = bigDecimal.longValueExact
/** Creates a partially constructed NumericRange[BigDecimal] in range
* `[start;end)`, where start is the target BigDecimal. The step
* must be supplied via the "by" method of the returned object in order
* to receive the fully constructed range. For example:
* {{{
* val partial = BigDecimal(1.0) to 2.0 // not usable yet
* val range = partial by 0.01 // now a NumericRange
* val range2 = BigDecimal(0) to 1.0 by 0.01 // all at once of course is fine too
* }}}
*
* @param end the end value of the range (exclusive)
* @return the partially constructed NumericRange
*/
def until(end: BigDecimal): Range.Partial[BigDecimal, NumericRange.Exclusive[BigDecimal]] =
new Range.Partial(until(end, _))
/** Same as the one-argument `until`, but creates the range immediately. */
def until(end: BigDecimal, step: BigDecimal) = Range.BigDecimal(this, end, step)
/** Like `until`, but inclusive of the end value. */
def to(end: BigDecimal): Range.Partial[BigDecimal, NumericRange.Inclusive[BigDecimal]] =
new Range.Partial(to(end, _))
/** Like `until`, but inclusive of the end value. */
def to(end: BigDecimal, step: BigDecimal) = Range.BigDecimal.inclusive(this, end, step)
/** Converts this `BigDecimal` to a scala.BigInt.
*/
def toBigInt(): BigInt = new BigInt(this.bigDecimal.toBigInteger())
/** Converts this `BigDecimal` to a scala.BigInt if it
* can be done losslessly, returning Some(BigInt) or None.
*/
def toBigIntExact(): Option[BigInt] =
if (isWhole()) {
try Some(new BigInt(this.bigDecimal.toBigIntegerExact()))
catch { case _: ArithmeticException => None }
}
else None
/** Returns the decimal String representation of this BigDecimal.
*/
override def toString(): String = this.bigDecimal.toString()
}
|
felixmulder/scala
|
src/library/scala/math/BigDecimal.scala
|
Scala
|
bsd-3-clause
| 33,802
|
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream}
import geotrellis.proj4.CRS
import geotrellis.raster.io.geotiff.writer.GeoTiffWriter
import geotrellis.raster.io.geotiff.{SinglebandGeoTiff, _}
import geotrellis.raster.{CellType, DoubleArrayTile, MultibandTile, Tile, UByteCellType}
import geotrellis.spark.io.hadoop._
import geotrellis.vector.{Extent, ProjectedExtent}
import org.apache.hadoop.io.SequenceFile.Writer
import org.apache.hadoop.io.{SequenceFile, _}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.mllib.clustering.{KMeans, KMeansModel}
import org.apache.spark.mllib.linalg.distributed._
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.sys.process._
//Spire is a numeric library for Scala which is intended to be generic, fast, and precise.
import spire.syntax.cfor._
object kmeans_satellite extends App {
override def main(args: Array[String]): Unit = {
val appName = this.getClass.getName
val masterURL = "spark://emma0.emma.nlesc.nl:7077"
val sc = new SparkContext(new SparkConf().setAppName(appName).setMaster(masterURL))
//MODE OF OPERATION
var rdd_offline_mode = true
var matrix_offline_mode = true
var kmeans_offline_mode = true
//GeoTiffs to be read from "hdfs:///user/hadoop/modis/"
var dir_path = "hdfs:///user/hadoop/avhrr/"
var offline_dir_path = "hdfs:///user/emma/avhrr/"
var geoTiff_dir = "SOST"
var band_num = 0
//Years between (inclusive) 1989 - 2014
val satellite_timeseries = (1989, 2014)
var satellite_first_year = 1989
var satellite_last_year = 2014
//Mask
val toBeMasked = true
val mask_path = "hdfs:///user/hadoop/usa_mask.tif"
//Kmeans number of iterations and clusters
var numIterations = 75
var minClusters = 100
var maxClusters = 100
var stepClusters = 10
var save_rdds = false
var save_matrix = false
var save_kmeans_model = false
//MODE OF OPERATION VALIDATION
if (minClusters > maxClusters) {
maxClusters = minClusters
stepClusters = 1
}
if (stepClusters < 1) {
stepClusters = 1
}
//Paths to store data structures for Offline runs
var mask_str = ""
if (toBeMasked)
mask_str = "_mask"
var grid0_path = offline_dir_path + geoTiff_dir + "/grid0" + "_" + band_num + mask_str
var grid0_index_path = offline_dir_path + geoTiff_dir + "/grid0_index" + "_" + band_num + mask_str
var grids_noNaN_path = offline_dir_path + geoTiff_dir + "/grids_noNaN" + "_" + band_num + mask_str
var metadata_path = offline_dir_path + geoTiff_dir + "/metadata" + "_" + band_num + mask_str
var grids_matrix_path = offline_dir_path + geoTiff_dir + "/grids_matrix" + "_" + band_num + mask_str
var grids_matrix_index_path = offline_dir_path + geoTiff_dir + "/grids_matrix_index" + "_" + band_num + mask_str
//Check offline modes
var conf = sc.hadoopConfiguration
var fs = org.apache.hadoop.fs.FileSystem.get(conf)
val rdd_offline_exists = fs.exists(new org.apache.hadoop.fs.Path(grid0_path))
val matrix_offline_exists = fs.exists(new org.apache.hadoop.fs.Path(grids_matrix_path))
if (rdd_offline_mode != rdd_offline_exists) {
println("\\"Load GeoTiffs\\" offline mode is not set properly, i.e., either it was set to false and the required file does not exist or vice-versa. We will reset it to " + rdd_offline_exists.toString())
rdd_offline_mode = rdd_offline_exists
}
if (matrix_offline_mode != matrix_offline_exists) {
println("\\"Matrix\\" offline mode is not set properly, i.e., either it was set to false and the required file does not exist or vice-versa. We will reset it to " + matrix_offline_exists.toString())
matrix_offline_mode = matrix_offline_exists
}
if (!fs.exists(new org.apache.hadoop.fs.Path(mask_path))) {
println("The mask path: " + mask_path + " is invalid!!!")
}
//Years
val satellite_years = satellite_timeseries._1 to satellite_timeseries._2
if (!satellite_years.contains(satellite_first_year) || !(satellite_years.contains(satellite_last_year))) {
println("Invalid range of years for " + geoTiff_dir + ". I should be between " + satellite_first_year + " and " + satellite_last_year)
System.exit(0)
}
var satellite_years_range = (satellite_years.indexOf(satellite_first_year), satellite_years.indexOf(satellite_last_year))
var num_kmeans: Int = 1
if (minClusters != maxClusters) {
num_kmeans = ((maxClusters - minClusters) / stepClusters) + 1
}
var kmeans_model_paths: Array[String] = Array.fill[String](num_kmeans)("")
var wssse_path: String = offline_dir_path + geoTiff_dir + "/" + numIterations + "_wssse"
var geotiff_hdfs_paths: Array[String] = Array.fill[String](num_kmeans)("")
var geotiff_tmp_paths: Array[String] = Array.fill[String](num_kmeans)("")
var numClusters_id = 0
if (num_kmeans > 1) {
numClusters_id = 0
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
kmeans_model_paths(numClusters_id) = offline_dir_path + geoTiff_dir + "/kmeans_model_" + band_num + "_" + numClusters + "_" + numIterations
//Check if the file exists
val kmeans_exist = fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(numClusters_id)))
if (kmeans_exist && !kmeans_offline_mode) {
println("The kmeans model path " + kmeans_model_paths(numClusters_id) + " exists, please remove it.")
} else if (!kmeans_exist && kmeans_offline_mode) {
kmeans_offline_mode = false
}
geotiff_hdfs_paths(numClusters_id) = offline_dir_path + geoTiff_dir + "/clusters_" + band_num + "_" + numClusters + "_" + numIterations + ".tif"
geotiff_tmp_paths(numClusters_id) = "/tmp/clusters_" + band_num + "_" + geoTiff_dir + "_" + numClusters + "_" + numIterations + ".tif"
if (fs.exists(new org.apache.hadoop.fs.Path(geotiff_hdfs_paths(numClusters_id)))) {
println("There is already a GeoTiff with the path: " + geotiff_hdfs_paths(numClusters_id) + ". Please make either a copy or move it to another location, otherwise, it will be over-written.")
}
numClusters_id += 1
}
kmeans_offline_mode = false
} else {
kmeans_model_paths(0) = offline_dir_path + geoTiff_dir + "/kmeans_model_" + band_num + "_" + minClusters + "_" + numIterations
val kmeans_offline_exists = fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(0)))
if (kmeans_offline_mode != kmeans_offline_exists) {
println("\\"Kmeans\\" offline mode is not set properly, i.e., either it was set to false and the required file does not exist or vice-versa. We will reset it to " + kmeans_offline_exists.toString())
kmeans_offline_mode = kmeans_offline_exists
}
geotiff_hdfs_paths(0) = offline_dir_path + geoTiff_dir + "/clusters_" + band_num + "_" + minClusters + "_" + numIterations + ".tif"
geotiff_tmp_paths(0) = "/tmp/clusters_" + band_num + "_" + geoTiff_dir + "_" + minClusters + "_" + numIterations + ".tif"
if (fs.exists(new org.apache.hadoop.fs.Path(geotiff_hdfs_paths(0)))) {
println("There is already a GeoTiff with the path: " + geotiff_hdfs_paths(0) + ". Please make either a copy or move it to another location, otherwise, it will be over-written.")
}
}
//FUNCTIONS TO (DE)SERIALIZE ANY STRUCTURE
def serialize(value: Any): Array[Byte] = {
val out_stream: ByteArrayOutputStream = new ByteArrayOutputStream()
val obj_out_stream = new ObjectOutputStream(out_stream)
obj_out_stream.writeObject(value)
obj_out_stream.close
out_stream.toByteArray
}
def deserialize(bytes: Array[Byte]): Any = {
val obj_in_stream = new ObjectInputStream(new ByteArrayInputStream(bytes))
val value = obj_in_stream.readObject
obj_in_stream.close
value
}
//LOAD GEOTIFFS
//----//
def hadoopGeoTiffRDD(satellite_filepath :String, pattern :String): RDD[(Int, (ProjectedExtent, Tile))] = {
val listFiles = sc.binaryFiles(satellite_filepath + "/" + pattern).sortBy(_._1).keys.collect()
var prevRDD :RDD[(Int,(ProjectedExtent, Tile))] = sc.emptyRDD
cfor(0)(_ < listFiles.length, _ + 1) { k =>
val filePath :String = listFiles(k)
val kB = sc.broadcast(k)
val currRDD = sc.hadoopGeoTiffRDD(filePath).map(m => (kB.value, m))
prevRDD = currRDD.union(prevRDD)
//kB.destroy()
}
prevRDD.sortBy(_._1)
}
//----//
def hadoopMultibandGeoTiffRDD(satellite_filepath :String, pattern :String): RDD[(Int, (ProjectedExtent, MultibandTile))] = {
val listFiles = sc.binaryFiles(satellite_filepath + "/" + pattern).sortBy(_._1).keys.collect()
var prevRDD :RDD[(Int,(ProjectedExtent, MultibandTile))] = sc.emptyRDD
if (prevRDD.isEmpty()) {
val k = 0
val filePath :String = listFiles(k)
val kB = sc.broadcast(k)
val prevRDD = sc.hadoopMultibandGeoTiffRDD(filePath).map(m => (kB.value,m))
}
cfor(1)(_ < listFiles.length, _ + 1) { k =>
val filePath :String = listFiles(k)
val kB = sc.broadcast(k)
val currRDD = sc.hadoopMultibandGeoTiffRDD(filePath).map(m => (kB.value,m))
prevRDD = currRDD.union(prevRDD)
//kB.destroy()
}
prevRDD.sortBy(_._1)
}
var t0 = System.nanoTime()
//Global variables
var projected_extent = new ProjectedExtent(new Extent(0,0,0,0), CRS.fromName("EPSG:3857"))
var grid0: RDD[(Long, Double)] = sc.emptyRDD
var grid0_index: RDD[Long] = sc.emptyRDD
var grids_noNaN_RDD: RDD[(Int, Array[Double])] = sc.emptyRDD
var num_cols_rows :(Int, Int) = (0, 0)
var cellT :CellType = UByteCellType
var grids_RDD :RDD[(Int, Array[Double])] = sc.emptyRDD
var mask_tile0 :Tile = new SinglebandGeoTiff(geotrellis.raster.ArrayTile.empty(cellT, num_cols_rows._1, num_cols_rows._2), projected_extent.extent, projected_extent.crs, Tags.empty, GeoTiffOptions.DEFAULT).tile
var grid_cells_size :Long = 0
//Load Mask
if (toBeMasked) {
val mask_tiles_RDD = sc.hadoopGeoTiffRDD(mask_path).values
val mask_tiles_withIndex = mask_tiles_RDD.zipWithIndex().map{case (e,v) => (v,e)}
mask_tile0 = (mask_tiles_withIndex.filter(m => m._1==0).values.collect())(0)
}
//Local variables
val pattern: String = "*.tif"
val filepath: String = dir_path + geoTiff_dir
if (rdd_offline_mode) {
grids_noNaN_RDD = sc.objectFile(grids_noNaN_path)
grid0 = sc.objectFile(grid0_path)
grid0_index = sc.objectFile(grid0_index_path)
val metadata = sc.sequenceFile(metadata_path, classOf[IntWritable], classOf[BytesWritable]).map(_._2.copyBytes()).collect()
projected_extent = deserialize(metadata(0)).asInstanceOf[ProjectedExtent]
num_cols_rows = (deserialize(metadata(1)).asInstanceOf[Int], deserialize(metadata(2)).asInstanceOf[Int])
cellT = deserialize(metadata(3)).asInstanceOf[CellType]
} else {
if (band_num == 0) {
//Lets load a Singleband GeoTiffs and return RDD just with the tiles.
var geos_RDD = hadoopGeoTiffRDD(filepath, pattern)
geos_RDD.cache()
var tiles_RDD = geos_RDD.map{ case (i,(p,t)) => (i,t)}
//Retrive the numbre of cols and rows of the Tile's grid
val tiles_withIndex = tiles_RDD//.zipWithIndex().map{case (e,v) => (v,e)}
val tile0 = (tiles_withIndex.filter(m => m._1==0).values.collect())(0)
num_cols_rows = (tile0.cols,tile0.rows)
cellT = tile0.cellType
//Retrieve the ProjectExtent which contains metadata such as CRS and bounding box
val projected_extents_withIndex = geos_RDD.map{ case (i,(p,t)) => (i,p)}//.keys.zipWithIndex().map { case (e, v) => (v, e) }
projected_extent = (projected_extents_withIndex.filter(m => m._1 == 0).values.collect()) (0)
if (toBeMasked) {
val mask_tile_broad :Broadcast[Tile] = sc.broadcast(mask_tile0)
grids_RDD = tiles_RDD.map{ case (i,m) => (i,m.localInverseMask(mask_tile_broad.value, 1, -1000).toArrayDouble())}
} else {
grids_RDD = tiles_RDD.map{ case (i,m) => (i, m.toArrayDouble())}
}
} else {
//Lets load Multiband GeoTiffs and return RDD just with the tiles.
val geos_RDD = hadoopMultibandGeoTiffRDD(filepath, pattern)
geos_RDD.cache()
val tiles_RDD = geos_RDD.map{ case (i,(p,t)) => (i,t)}
//Retrive the numbre of cols and rows of the Tile's grid
val tiles_withIndex = tiles_RDD//.zipWithIndex().map{case (e,v) => (v,e)}
val tile0 = (tiles_withIndex.filter(m => m._1==0).values.collect())(0)
num_cols_rows = (tile0.cols,tile0.rows)
cellT = tile0.cellType
//Retrieve the ProjectExtent which contains metadata such as CRS and bounding box
val projected_extents_withIndex = geos_RDD.map{ case (i,(p,t)) => (i,p)}//.keys.zipWithIndex().map { case (e, v) => (v, e) }
projected_extent = (projected_extents_withIndex.filter(m => m._1 == 0).values.collect()) (0)
//Lets read the average of the Spring-Index which is stored in the 4th band
val band_numB :Broadcast[Int] = sc.broadcast(band_num)
if (toBeMasked) {
val mask_tile_broad :Broadcast[Tile] = sc.broadcast(mask_tile0)
grids_RDD = tiles_RDD.map{ case (i,m) => (i, m.band(band_numB.value).localInverseMask(mask_tile_broad.value, 1, -1000).toArrayDouble())}
} else {
grids_RDD = tiles_RDD.map{ case (i,m) => (i, m.band(band_numB.value).toArrayDouble())}
}
}
//Get Index for each Cell
val grids_withIndex = grids_RDD//.zipWithIndex().map { case (e, v) => (v, e) }
if (toBeMasked) {
grid0_index = grids_withIndex.filter(m => m._1 == 0).values.flatMap(m => m).zipWithIndex.filter(m => m._1 != -1000.0).map { case (v, i) => (i) }
} else {
grid0_index = grids_withIndex.filter(m => m._1 == 0).values.flatMap(m => m).zipWithIndex.map { case (v, i) => (i) }
}
//Get the Tile's grid
grid0 = grids_withIndex.filter(m => m._1 == 0).values.flatMap( m => m).zipWithIndex.map{case (v,i) => (i,v)}
//Lets filter out NaN
if (toBeMasked) {
grids_noNaN_RDD = grids_RDD.map{ case (i,m) => (i,m.filter(m => m != -1000.0))}
} else {
grids_noNaN_RDD = grids_RDD
}
//Store data in HDFS
if (save_rdds) {
grid0.saveAsObjectFile(grid0_path)
grid0_index.saveAsObjectFile(grid0_index_path)
grids_noNaN_RDD.saveAsObjectFile(grids_noNaN_path)
}
val grids_noNaN_RDD_withIndex = grids_noNaN_RDD//.zipWithIndex().map { case (e, v) => (v, e) }
val sat_year_diff = satellite_first_year-satellite_timeseries._1
val sat_year_diffB = sc.broadcast(sat_year_diff)
grids_noNaN_RDD = grids_noNaN_RDD_withIndex.filterByRange(satellite_years_range._1, satellite_years_range._2).map{ case(i,a) => (i-(sat_year_diffB.value),a)}
if (save_rdds) {
val writer: SequenceFile.Writer = SequenceFile.createWriter(conf,
Writer.file(metadata_path),
Writer.keyClass(classOf[IntWritable]),
Writer.valueClass(classOf[BytesWritable])
)
writer.append(new IntWritable(1), new BytesWritable(serialize(projected_extent)))
writer.append(new IntWritable(2), new BytesWritable(serialize(num_cols_rows._1)))
writer.append(new IntWritable(3), new BytesWritable(serialize(num_cols_rows._2)))
writer.append(new IntWritable(4), new BytesWritable(serialize(cellT)))
writer.hflush()
writer.close()
}
}
grid_cells_size = grid0_index.count().toInt
var t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
//MATRIX
t0 = System.nanoTime()
//Global variables
var grids_matrix: RDD[Vector] = sc.emptyRDD
var grids_matrix_index :RDD[(Long, Long)] = sc.emptyRDD
val grid_cells_sizeB = sc.broadcast(grid_cells_size)
if (matrix_offline_mode) {
grids_matrix = sc.objectFile(grids_matrix_path)
grids_matrix_index = sc.objectFile(grids_matrix_index_path)
} else {
val mat :IndexedRowMatrix = new IndexedRowMatrix(grids_noNaN_RDD.map{ case (i, m) => (i,m.zipWithIndex)}.map{ case (i,m) => (i,m.filter(!_._1.isNaN))}.map{ case (i,m) => new IndexedRow(i.toLong, Vectors.sparse(grid_cells_sizeB.value.toInt, m.map(v => v._2), m.map(v => v._1)))})
val mat_T = mat.toCoordinateMatrix().transpose().toIndexedRowMatrix().rows.sortBy(_.index)
grids_matrix = mat_T.map(_.vector)
grids_matrix_index = mat_T.map(_.index).zipWithIndex().map{ case (v,i) => (i,v)}
if (save_matrix) {
grids_matrix.saveAsObjectFile(grids_matrix_path)
grids_matrix_index.saveAsObjectFile(grids_matrix_index_path)
}
}
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
//KMEANS TRAINING
t0 = System.nanoTime()
//Global variables
var kmeans_models :Array[KMeansModel] = new Array[KMeansModel](num_kmeans)
var wssse_data :List[(Int, Int, Double)] = List.empty
if (kmeans_offline_mode) {
numClusters_id = 0
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
if (!fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(numClusters_id)))) {
println("One of the files does not exist, we will abort!!!")
System.exit(0)
} else {
kmeans_models(numClusters_id) = KMeansModel.load(sc, kmeans_model_paths(numClusters_id))
}
numClusters_id += 1
}
val wssse_data_RDD :RDD[(Int, Int, Double)] = sc.objectFile(wssse_path)
wssse_data = wssse_data_RDD.collect().toList
} else {
numClusters_id = 0
if (fs.exists(new org.apache.hadoop.fs.Path(wssse_path))) {
val wssse_data_RDD :RDD[(Int, Int, Double)] = sc.objectFile(wssse_path)
wssse_data = wssse_data_RDD.collect().toList
}
grids_matrix.cache()
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
println(numClusters)
kmeans_models(numClusters_id) = {
KMeans.train(grids_matrix, numClusters, numIterations)
}
// Evaluate clustering by computing Within Set Sum of Squared Errors
val WSSSE = kmeans_models(numClusters_id).computeCost(grids_matrix)
println("Within Set Sum of Squared Errors = " + WSSSE)
wssse_data = wssse_data :+ (numClusters, numIterations, WSSSE)
//Save kmeans model
if (save_kmeans_model) {
if (!fs.exists(new org.apache.hadoop.fs.Path(kmeans_model_paths(numClusters_id)))) {
kmeans_models(numClusters_id).save(sc, kmeans_model_paths(numClusters_id))
}
}
numClusters_id += 1
if (fs.exists(new org.apache.hadoop.fs.Path(wssse_path))) {
println("We will delete the wssse file")
try { fs.delete(new org.apache.hadoop.fs.Path(wssse_path), true) } catch { case _ : Throwable => { } }
}
println("Lets create it with the new data")
sc.parallelize(wssse_data, 1).saveAsObjectFile(wssse_path)
}
//Un-persist it to save memory
grids_matrix.unpersist()
}
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
//INSPECT WSSE
t0 = System.nanoTime()
//from disk
if (fs.exists(new org.apache.hadoop.fs.Path(wssse_path))) {
var wssse_data_tmp :RDD[(Int, Int, Double)] = sc.objectFile(wssse_path)//.collect()//.toList
println(wssse_data_tmp.collect().toList)
}
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
//RUN KMEANS CLUSTERING
t0 = System.nanoTime()
//Cache it so kmeans is more efficient
grids_matrix.cache()
var kmeans_res: Array[RDD[Int]] = Array.fill(num_kmeans)(sc.emptyRDD)
var kmeans_centroids: Array[Array[Double]] = Array.fill(num_kmeans)(Array.emptyDoubleArray)
numClusters_id = 0
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
kmeans_res(numClusters_id) = kmeans_models(numClusters_id).predict(grids_matrix)
kmeans_centroids(numClusters_id) = kmeans_models(numClusters_id).clusterCenters.map(m => m(0))
numClusters_id += 1
}
//Un-persist it to save memory
grids_matrix.unpersist()
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
//SANITY TEST
t0 = System.nanoTime()
val kmeans_res_out = kmeans_res(0).filter(_ != 0).filter(_ != 1).take(150)
println(kmeans_res_out.size)
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
//BUILD GEOTIFFS
t0 = System.nanoTime()
numClusters_id = 0
val grid0_index_I = grid0_index.zipWithIndex().map{ case (v,i) => (i,v)}
grid0_index_I.cache()
grid0.cache()
grids_matrix_index.cache()
cfor(minClusters)(_ <= maxClusters, _ + stepClusters) { numClusters =>
val kmeans_out = (kmeans_res(numClusters_id).zipWithIndex().map{ case (v,i) => (i,v)}).join(grids_matrix_index).map{ case (z,(k,i)) => (i,k)}
val cluster_cell_pos = kmeans_out.join(grid0_index_I).map{ case (k,(v,i)) => (v,i)}
//Associate a Cluster_IDs to respective Grid_cell
val grid_clusters = grid0.map{ case (i, v) => if (v == 0.0) (i,Double.NaN) else (i,v)}.leftOuterJoin(cluster_cell_pos.map{ case (c,i) => (i.toLong, c)})
//Convert all None to NaN
val grid_clusters_res = grid_clusters.sortByKey(true).map{case (k, (v, c)) => if (c == None) (k, Int.MaxValue) else (k, c.get)}
//Define a Tile
val cluster_cellsID :Array[Int] = grid_clusters_res.values.collect()
var cluster_cells :Array[Double] = Array.fill(cluster_cellsID.length)(Double.NaN)
cfor(0)(_ < cluster_cellsID.size, _ + 1) { cellID =>
if (cluster_cellsID(cellID) != Int.MaxValue) {
cluster_cells(cellID) = kmeans_centroids(numClusters_id)(cluster_cellsID(cellID))
}
}
val cluster_cellsD = DoubleArrayTile(cluster_cells, num_cols_rows._1, num_cols_rows._2)
val geoTif = new SinglebandGeoTiff(cluster_cellsD, projected_extent.extent, projected_extent.crs, Tags.empty, GeoTiffOptions(compression.DeflateCompression))
//Save to /tmp/
GeoTiffWriter.write(geoTif, geotiff_tmp_paths(numClusters_id))
//Upload to HDFS
var cmd = "hadoop dfs -copyFromLocal -f " + geotiff_tmp_paths(numClusters_id) + " " + geotiff_hdfs_paths(numClusters_id)
println(cmd)
Process(cmd)!
//Remove from /tmp/
cmd = "rm -fr " + geotiff_tmp_paths(numClusters_id)
println(cmd)
Process(cmd)!
numClusters_id += 1
}
grid0_index_I.unpersist()
grid0.unpersist()
t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
}
}
|
phenology/infrastructure
|
applications/ides/scala/src/main/scala/kmeans_satellite.scala
|
Scala
|
apache-2.0
| 23,107
|
package fe.CCC
abstract class VFactory[_VImpl[A]] {
type VImpl[A] = _VImpl[A]
def One[A](one: A): VImpl[A]
def Chc[A](s: Formula, y: VImpl[A], n: VImpl[A]): VImpl[A]
def flatMap[A,B](va: VImpl[A], f: (A)=>VImpl[B]): VImpl[B]
def map[A,B](va: VImpl[A], f: (A)=>B): VImpl[B] = flatMap(va, (a: A) => One(f(a)))
def isOne[A](t: VImpl[A]): Option[A]
def evaluate[A](t: VImpl[A], s: Set[String]): A
def vToString[A](t: VImpl[A]): String = t.toString
}
abstract class V[VImpl[C] <: V[VImpl]] {
type C
def vf: VFactory[VImpl]
def va: VImpl[C]
def map[B](f: (C)=>B): VImpl[B] = vf.map(va, f)
def flatMap[B](f: (C)=>VImpl[B]): VImpl[B] = vf.flatMap(va, f)
def isOne(): Option[C] = vf.isOne(va)
def evaluate(s: Set[String]): C = vf.evaluate(va, s)
def apply(s: Set[String]): C = evaluate(s)
override def toString: String = vf.vToString(va)
}
|
FiveEye/CCC
|
scala/src/main/scala/absV.scala
|
Scala
|
mit
| 894
|
package playground.models
trait Identifiable {
def id: Id
}
|
ybr/playground
|
src/main/scala/playground/models/Identifiable.scala
|
Scala
|
mit
| 63
|
package calculator
import scala.scalajs.js
import org.scalajs.dom
import org.scalajs.dom.html
import dom.document
object CalculatorUI extends js.JSApp {
def main(): Unit = {
try {
setupTweetMeasurer()
setup2ndOrderPolynomial()
setupCalculator()
} catch {
case th: Throwable =>
th.printStackTrace()
}
}
// Helpers
def elementById[A <: js.Any](id: String): A =
document.getElementById(id).asInstanceOf[A]
def elementValueSignal(element: html.Element,
getValue: () => String): Signal[String] = {
var prevVal = getValue()
val value = new Var(prevVal)
val onChange = { (event: dom.Event) =>
// Reconstruct manually the optimization at the root of the graph
val newVal = getValue()
if (newVal != prevVal) {
prevVal = newVal
value() = newVal
}
}
element.addEventListener("change", onChange)
element.addEventListener("keypress", onChange)
element.addEventListener("keyup", onChange)
value
}
def inputValueSignal(input: html.Input): Signal[String] =
elementValueSignal(input, () => input.value)
def textAreaValueSignal(textAreaID: String): Signal[String] = {
val textArea = elementById[html.TextArea](textAreaID)
elementValueSignal(textArea, () => textArea.value)
}
private lazy val ClearCssClassRegExp =
new js.RegExp(raw"""(?:^|\\s)has-error(?!\\S)""", "g")
def doubleValueOfInput(input: html.Input): Signal[Double] = {
val text = inputValueSignal(input)
val parent = input.parentElement
Signal {
import js.JSStringOps._
parent.className = parent.className.jsReplace(ClearCssClassRegExp, "")
try {
text().toDouble
} catch {
case e: NumberFormatException =>
parent.className += " has-error"
Double.NaN
}
}
}
// TWEET LENGTH
def setupTweetMeasurer(): Unit = {
val tweetText = textAreaValueSignal("tweettext")
val remainingCharsArea =
document.getElementById("tweetremainingchars").asInstanceOf[html.Span]
val remainingCount = TweetLength.tweetRemainingCharsCount(tweetText)
Signal {
remainingCharsArea.textContent = remainingCount().toString
}
val color = TweetLength.colorForRemainingCharsCount(remainingCount)
Signal {
remainingCharsArea.style.color = color()
}
}
// 2ND ORDER POLYNOMIAL
def setup2ndOrderPolynomial(): Unit = {
val ids = List("polyroota", "polyrootb", "polyrootc")
val inputs = ids.map(id => elementById[html.Input](id))
val doubleValues = inputs.map(doubleValueOfInput)
val List(a, b, c) = doubleValues
val delta = Polynomial.computeDelta(a, b, c)
val deltaArea = elementById[html.Span]("polyrootdelta")
Signal {
deltaArea.textContent = delta().toString
}
val solutions = Polynomial.computeSolutions(a, b, c, delta)
val solutionsArea = elementById[html.Span]("polyrootsolutions")
Signal {
solutionsArea.textContent = solutions().toString
}
}
// CALCULATOR
def setupCalculator(): Unit = {
val names = (0 until 10).map(i => ('a' + i).toChar.toString)
val inputs = names.map(name => elementById[html.Input]("calculatorexpr" + name))
val exprs = inputs.map(exprOfInput)
val namedExpressions = names.zip(exprs).toMap
val namedValues = Calculator.computeValues(namedExpressions)
assert(namedValues.keySet == namedExpressions.keySet)
for ((name, valueSignal) <- namedValues) {
val span = elementById[html.Span]("calculatorval" + name)
var dehighlightTimeout: Option[js.timers.SetTimeoutHandle] = None
Signal {
span.textContent = valueSignal().toString
span.style.backgroundColor = "#ffff99"
dehighlightTimeout.foreach(js.timers.clearTimeout)
dehighlightTimeout = Some(js.timers.setTimeout(1500) {
dehighlightTimeout = None
span.style.backgroundColor = "white"
})
}
}
}
def exprOfInput(input: html.Input): Signal[Expr] = {
val text = inputValueSignal(input)
val parent = input.parentElement
Signal {
import js.JSStringOps._
parent.className = parent.className.jsReplace(ClearCssClassRegExp, "")
try {
parseExpr(text())
} catch {
case e: IllegalArgumentException =>
parent.className += " has-error"
Literal(Double.NaN)
}
}
}
def parseExpr(text: String): Expr = {
def parseSimple(text: String): Expr = {
if (text.forall(l => l >= 'a' && l <= 'z')) {
Ref(text)
} else {
try {
Literal(text.toDouble)
} catch {
case e: NumberFormatException =>
throw new IllegalArgumentException(s"$text is neither a variable name nor a number")
}
}
}
text.split(" ").map(_.trim).filter(_ != "") match {
case Array(x) => parseSimple(x)
case Array(aText, op, bText) =>
val a = parseSimple(aText)
val b = parseSimple(bText)
op match {
case "+" => Plus(a, b)
case "-" => Minus(a, b)
case "*" => Times(a, b)
case "/" => Divide(a, b)
case _ =>
throw new IllegalArgumentException(s"$op is not a valid operator")
}
case _ =>
throw new IllegalArgumentException(s"$text is not a valid simple expression")
}
}
}
|
amathewk/func_design_scala
|
week4/calculator/web-ui/src/main/scala/calculator/CalculatorUI.scala
|
Scala
|
lgpl-3.0
| 5,410
|
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2021 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.persistence.jdbc.query
import akka.persistence.query.{ EventEnvelope, NoOffset, Sequence }
import akka.pattern.ask
import com.typesafe.config.{ ConfigValue, ConfigValueFactory }
import scala.concurrent.duration._
import akka.Done
import akka.persistence.jdbc.query.EventAdapterTest.{ Event, TaggedAsyncEvent }
import scala.concurrent.Future
import CurrentEventsByTagTest._
object CurrentEventsByTagTest {
val maxBufferSize = 20
val refreshInterval = 500.milliseconds
val configOverrides: Map[String, ConfigValue] = Map(
"jdbc-read-journal.max-buffer-size" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString),
"jdbc-read-journal.refresh-interval" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString()))
}
abstract class CurrentEventsByTagTest(config: String) extends QueryTestSpec(config, configOverrides) {
it should "not find an event by tag for unknown tag" in withActorSystem { implicit system =>
val journalOps = new ScalaJdbcReadJournalOperations(system)
withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>
(actor1 ? withTags(1, "one")).futureValue
(actor2 ? withTags(2, "two")).futureValue
(actor3 ? withTags(3, "three")).futureValue
eventually {
journalOps.countJournal.futureValue shouldBe 3
}
journalOps.withCurrentEventsByTag()("unknown", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectComplete()
}
}
}
it should "find all events by tag" in withActorSystem { implicit system =>
val journalOps = new ScalaJdbcReadJournalOperations(system)
withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>
(actor1 ? withTags(1, "number")).futureValue
(actor2 ? withTags(2, "number")).futureValue
(actor3 ? withTags(3, "number")).futureValue
eventually {
journalOps.countJournal.futureValue shouldBe 3
}
journalOps.withCurrentEventsByTag()("number", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("number", Sequence(0)) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("number", Sequence(1)) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("number", Sequence(2)) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("number", Sequence(3)) { tp =>
tp.request(Int.MaxValue)
tp.expectComplete()
}
}
}
it should "persist and find a tagged event with multiple tags" in withActorSystem { implicit system =>
val journalOps = new ScalaJdbcReadJournalOperations(system)
withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>
withClue("Persisting multiple tagged events") {
(actor1 ? withTags(1, "one", "1", "prime")).futureValue
(actor1 ? withTags(2, "two", "2", "prime")).futureValue
(actor1 ? withTags(3, "three", "3", "prime")).futureValue
(actor1 ? withTags(4, "four", "4")).futureValue
(actor1 ? withTags(5, "five", "5", "prime")).futureValue
(actor2 ? withTags(3, "three", "3", "prime")).futureValue
(actor3 ? withTags(3, "three", "3", "prime")).futureValue
(actor1 ? 1).futureValue
(actor1 ? 1).futureValue
eventually {
journalOps.countJournal.futureValue shouldBe 9
}
}
journalOps.withCurrentEventsByTag()("one", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("prime", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(6), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(7), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("3", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(6), _, _, _) => }
tp.expectNextPF { case EventEnvelope(Sequence(7), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("4", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(4), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("four", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(4), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("5", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => }
tp.expectComplete()
}
journalOps.withCurrentEventsByTag()("five", NoOffset) { tp =>
tp.request(Int.MaxValue)
tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => }
tp.expectComplete()
}
}
}
it should "complete without any gaps in case events are being persisted when the query is executed" in withActorSystem {
implicit system =>
val journalOps = new JavaDslJdbcReadJournalOperations(system)
import system.dispatcher
withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>
def sendMessagesWithTag(tag: String, numberOfMessagesPerActor: Int): Future[Done] = {
val futures = for (actor <- Seq(actor1, actor2, actor3); i <- 1 to numberOfMessagesPerActor) yield {
actor ? TaggedAsyncEvent(Event(i.toString), tag)
}
Future.sequence(futures).map(_ => Done)
}
val tag = "someTag"
// send a batch of 3 * 200
val batch1 = sendMessagesWithTag(tag, 200)
// Try to persist a large batch of events per actor. Some of these may be returned, but not all!
// Reduced for 5.0.0 as we can no longer do a batch insert due to the insert returning the ordering
// so trying to persist 1000s in a batch is slower
val batch2 = sendMessagesWithTag(tag, 2000)
// wait for acknowledgement of the first batch only
batch1.futureValue
// Sanity check, all events in the first batch must be in the journal
journalOps.countJournal.futureValue should be >= 600L
// start the query before the last batch completes
journalOps.withCurrentEventsByTag()(tag, NoOffset) { tp =>
// The stream must complete within the given amount of time
// This make take a while in case the journal sequence actor detects gaps
val allEvents = tp.toStrict(atMost = 20.seconds)
allEvents.size should be >= 600
val expectedOffsets = 1L.to(allEvents.size).map(Sequence.apply)
allEvents.map(_.offset) shouldBe expectedOffsets
}
batch2.futureValue
}
}
}
// Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config
class H2ScalaCurrentEventsByTagTest extends CurrentEventsByTagTest("h2-shared-db-application.conf") with H2Cleaner
|
dnvriend/akka-persistence-jdbc
|
core/src/test/scala/akka/persistence/jdbc/query/CurrentEventsByTagTest.scala
|
Scala
|
apache-2.0
| 8,450
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.plans.logical.{HintErrorHandler, HintInfo}
/**
* The hint error handler that logs warnings for each hint error.
*/
object HintErrorLogger extends HintErrorHandler with Logging {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
override def hintNotRecognized(name: String, parameters: Seq[Any]): Unit = {
logWarning(s"Unrecognized hint: ${hintToPrettyString(name, parameters)}")
}
override def hintRelationsNotFound(
name: String, parameters: Seq[Any], invalidRelations: Set[Seq[String]]): Unit = {
invalidRelations.foreach { ident =>
logWarning(s"Count not find relation '${ident.quoted}' specified in hint " +
s"'${hintToPrettyString(name, parameters)}'.")
}
}
override def joinNotFoundForJoinHint(hint: HintInfo): Unit = {
logWarning(s"A join hint $hint is specified but it is not part of a join relation.")
}
override def joinHintNotSupported(hint: HintInfo, reason: String): Unit = {
logWarning(s"Hint $hint is not supported in the query: $reason.")
}
override def hintOverridden(hint: HintInfo): Unit = {
logWarning(s"Hint $hint is overridden by another hint and will not take effect.")
}
private def hintToPrettyString(name: String, parameters: Seq[Any]): String = {
val prettyParameters = parameters.map {
case a: UnresolvedAttribute => a.nameParts.mkString(".")
case e: Any => e.toString
}
s"$name${prettyParameters.mkString("(", ", ", ")")}"
}
}
|
mahak/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HintErrorLogger.scala
|
Scala
|
apache-2.0
| 2,404
|
package org.jetbrains.plugins.scala
package codeInsight
package template
package impl
import org.jetbrains.plugins.scala.codeInsight.ScalaCodeInsightBundle
import org.jetbrains.plugins.scala.lang.psi.api._
/**
* @author Alefas
* @since 18/12/14.
*/
final class ScalaStringContextType extends ScalaFileTemplateContextType.ElementContextType("STRING", ScalaCodeInsightBundle.message("element.context.type.string")) {
override protected def isInContext(offset: Int)
(implicit file: ScalaFile): Boolean =
ScalaStringContextType.isInContext(offset)
}
object ScalaStringContextType {
private[impl] def isInContext(offset: Int)
(implicit file: ScalaFile): Boolean =
ScalaFileTemplateContextType.isInContext(offset, classOf[base.ScLiteral])(_.isString)
}
|
JetBrains/intellij-scala
|
scala/codeInsight/src/org/jetbrains/plugins/scala/codeInsight/template/impl/ScalaStringContextType.scala
|
Scala
|
apache-2.0
| 836
|
/* date: Jun 22, 2012
EDIT NODE
Evaluates the user's input before the input is stored.
If the evaluation fails, then the user is given the
opportunity to reenter the correct response.
For example, the Edit command (beginning with the 'e' tag):
d Enter the type of animal shown (# $animal)
e ($animal)=(cat)
requires the input to be "cat", otherwise the input field
is cleared, awaiting a new entry. The Edit command has
the capability of displaying a user prompt on failure. The
prompt message is displayed on the status line. In the command,
the message follows the 'status=' tag, for example:
e ($animal)=(cat) status=The animal picture is a cat
The expression '($animal)=(cat)' is a logic expression that is
similar in function to the logic of the 'c', 'f', 'a', 'g'
commands. For example:
e ($ans)=(y) or ($ans)=(n) status=Enter y or n
supports a yes/no like response (note: the answer box
(# /limit 1/ $ans) restricts imput to one character).
The literals 'number' and 'letter' may replace the logic expression,
for example
e number status=A number is required
The literal 'letter' forces the input to consist of a non numeric
value.
The Edit command has an optional identifier associating it with
a particular input field:
e <identifier> <logic> <status>
The identifier is required when a Display command has more that
one input field, e.g.,
d Enter quantity=(# $quantity) and price=(# $price)
e number
The Edit command is only applied to the input of $price and not
$quantity. The $<variable> will associate the Edit command with
a particular input field, e.g.,
e $price number
e $quantity number
e $quantity ($quantity)> (1) and ($quantity) < (5)
More than one Edit command can be associated with an input field
as explained next.
The EditNode is a child of the AnswerBox parent, allowing an
input field to have more than one edit evaluation. If anyone
evaluation fails, then the user is required to reenter a response.
The EditNode action occurs in the KeyListenerObject--it is here
that the input field is captured and can be subject to evaluation.
KeyListenerObject invokes BoxField (parent) to process its
EditNode children.
*/
package com.client
import scala.collection.mutable.Map
case class EditNode(var symbolTable:Map[String,String]) extends Node {
/*
Node
symbolTable holds $<variables> def setId
def convertToSibling
def convertToChild
*/
//------------------------------swizzle routines---------------------
def convertToReference(swizzleTable:Map[String, Node])={
convertToSibling(swizzleTable) //child of AnswerBox
}
//-------------------------------------------------------------------
var xtype=""
var statusMessage=""
var variable="" // not utilized ???
var conditionStruct=""
// -----------------------------------------------
//Invoked by BoxField in an iteration loop
def evaluateTheEditNode(response:String):Boolean= {
// "" responses unallowed
if(response=="") false
else {
if(isNumberOrLetter(xtype)) // edit cmd such as 'e number'
evaluateNumberOrLetter(xtype, response)
else { // edit cmd has logic such as 'e ($age) > (0)'
val xxx=isConditionTrue // invokes LogicTest.logicTest(..)
xxx
}
}
}
def isNumberOrLetter(xtype:String):Boolean= { xtype=="number" || xtype=="letter"}
def evaluateNumberOrLetter(xtype:String, response:String)= {
xtype match {
case "number" =>
areAllDigits(response)
case "letter" =>
areAllLetters(response)
case _=> println("EditNode unknown xtype="+xtype)
false
}
}
def areAllDigits(s:String):Boolean={ s.forall(x=> x.isDigit || x=='.') }
def areAllLetters(s:String):Boolean={ s.forall(x=> x.isLetter || x==' ' ) }
def isConditionPresent ={ if(conditionStruct != "0") true; else false }
def isConditionTrue:Boolean ={ LogicTest.logicTest(conditionStruct, symbolTable) }
// Invoked by BoxField
def getFailureMessage= statusMessage
// -----------------------------------------------
// CreateClass generates instances of EditNode without fields or parameters.
// However, it invokes 'receive_objects' to load parameters from *.struct
// file as well as symbolic addresses to be made physical ones.
def receive_objects(structSet:List[String] ) {
import util.control.Breaks._
var flag=true
for( e <- structSet) {
breakable { if(e=="%%") break // end of arguments
else {
var pair=e.split("[\t]")
pair(0) match {
case "address" =>
setAddress(pair(1))
case "sibling" =>
setNext(pair(1))
case "condition" =>
conditionStruct= pair(1)
case "type" =>
xtype=pair(1)
case "status" =>
statusMessage=pair(1)
if(statusMessage=="0") //otherwise BoxField displays "0"
statusMessage=""
case "variable" =>
variable=pair(1)
}
}
} //breakable
}
}
/*
val in=structSet.iterator
setAddress(in.next) //Node
setNext(in.next) //Node link to next EditNode
conditionStruct=in.next // logic expression such as (1)=(1)
xtype=in.next // 'number' or 'letter'
statusMessage=in.next
variable=in.next // $<variable> associated with edit
val percent=in.next
//println("EditNode: percent="+percent)
*/
}
|
hangle/Notecard
|
src/EditNode.scala
|
Scala
|
apache-2.0
| 5,334
|
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert2.transforms
class MathFunctionFactory extends TransformerFunctionFactory {
override def functions: Seq[TransformerFunction] = Seq(add, subtract, multiply, divide, mean, min, max)
private val add = TransformerFunction.pure("add") { args =>
var s: Double = 0.0
args.foreach(s += parseDouble(_))
s
}
private val multiply = TransformerFunction.pure("multiply") { args =>
var s: Double = 1.0
args.foreach(s *= parseDouble(_))
s
}
private val subtract = TransformerFunction.pure("subtract") { args =>
var s: Double = parseDouble(args(0))
args.drop(1).foreach(s -= parseDouble(_))
s
}
private val divide = TransformerFunction.pure("divide") { args =>
var s: Double = parseDouble(args(0))
args.drop(1).foreach(s /= parseDouble(_))
s
}
private val mean = TransformerFunction.pure("mean") { args =>
if (args.length == 0) { 0d } else {
var count = 0d
args.map(parseDouble).foreach(d => count += d)
count / args.length
}
}
private val min = TransformerFunction.pure("min") { args =>
var min = java.lang.Double.POSITIVE_INFINITY
args.map(parseDouble).foreach(d => if (min > d) { min = d })
min
}
private val max = TransformerFunction.pure("max") { args =>
var max = java.lang.Double.NEGATIVE_INFINITY
args.map(parseDouble).foreach(d => if (max < d) { max = d })
max
}
private def parseDouble(v: Any): Double = {
v match {
case n: Int => n.toDouble
case n: Double => n
case n: Float => n.toDouble
case n: Long => n.toDouble
case n: String => n.toDouble
case n: Any => n.toString.toDouble
}
}
}
|
elahrvivaz/geomesa
|
geomesa-convert/geomesa-convert-common/src/main/scala/org/locationtech/geomesa/convert2/transforms/MathFunctionFactory.scala
|
Scala
|
apache-2.0
| 2,181
|
package lila.base
import akka.actor.ActorSystem
import ornicar.scalalib.Zero
import scala.collection.BuildFrom
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext => EC, Future, Await }
import scala.util.Try
import lila.common.Chronometer
import LilaTypes._
final class PimpedFuture[A](private val fua: Fu[A]) extends AnyVal {
@inline def dmap[B](f: A => B): Fu[B] = fua.map(f)(EC.parasitic)
@inline def dforeach[B](f: A => Unit): Unit = fua.foreach(f)(EC.parasitic)
def >>-(sideEffect: => Unit)(implicit ec: EC): Fu[A] =
fua andThen { case _ =>
sideEffect
}
def >>[B](fub: => Fu[B])(implicit ec: EC): Fu[B] =
fua flatMap { _ =>
fub
}
@inline def void: Fu[Unit] =
dmap { _ =>
()
}
@inline def inject[B](b: => B): Fu[B] =
dmap { _ =>
b
}
def injectAnyway[B](b: => B)(implicit ec: EC): Fu[B] = fold(_ => b, _ => b)
def effectFold(fail: Exception => Unit, succ: A => Unit)(implicit ec: EC): Unit = {
fua onComplete {
case scala.util.Failure(e: Exception) => fail(e)
case scala.util.Failure(e) => throw e // Throwables
case scala.util.Success(e) => succ(e)
}
}
def fold[B](fail: Exception => B, succ: A => B)(implicit ec: EC): Fu[B] =
fua map succ recover { case e: Exception => fail(e) }
def flatFold[B](fail: Exception => Fu[B], succ: A => Fu[B])(implicit ec: EC): Fu[B] =
fua flatMap succ recoverWith { case e: Exception => fail(e) }
def logFailure(logger: => lila.log.Logger, msg: Throwable => String)(implicit ec: EC): Fu[A] =
addFailureEffect { e =>
logger.warn(msg(e), e)
}
def logFailure(logger: => lila.log.Logger)(implicit ec: EC): Fu[A] = logFailure(logger, _.toString)
def addFailureEffect(effect: Throwable => Unit)(implicit ec: EC) = {
fua.failed.foreach { e: Throwable =>
effect(e)
}
fua
}
def addEffect(effect: A => Unit)(implicit ec: EC): Fu[A] = {
fua foreach effect
fua
}
def addEffects(fail: Exception => Unit, succ: A => Unit)(implicit ec: EC): Fu[A] = {
fua onComplete {
case scala.util.Failure(e: Exception) => fail(e)
case scala.util.Failure(e) => throw e // Throwables
case scala.util.Success(e) => succ(e)
}
fua
}
def addEffects(f: Try[A] => Unit)(implicit ec: EC): Fu[A] = {
fua onComplete f
fua
}
def addEffectAnyway(inAnyCase: => Unit)(implicit ec: EC): Fu[A] = {
fua onComplete { _ =>
inAnyCase
}
fua
}
def mapFailure(f: Exception => Exception)(implicit ec: EC) =
fua recoverWith { case cause: Exception =>
fufail(f(cause))
}
def prefixFailure(p: => String)(implicit ec: EC) =
mapFailure { e =>
LilaException(s"$p ${e.getMessage}")
}
def thenPp(implicit ec: EC): Fu[A] = {
effectFold(
e => println("[failure] " + e),
a => println("[success] " + a)
)
fua
}
def thenPp(msg: String)(implicit ec: EC): Fu[A] = {
effectFold(
e => println(s"[$msg] [failure] $e"),
a => println(s"[$msg] [success] $a")
)
fua
}
def await(duration: FiniteDuration, name: String): A =
Chronometer.syncMon(_.blocking.time(name)) {
Await.result(fua, duration)
}
def awaitOrElse(duration: FiniteDuration, name: String, default: => A): A =
try {
await(duration, name)
} catch {
case _: Exception => default
}
def withTimeout(duration: FiniteDuration)(implicit ec: EC, system: ActorSystem): Fu[A] =
withTimeout(duration, LilaTimeout(s"Future timed out after $duration"))
def withTimeout(
duration: FiniteDuration,
error: => Throwable
)(implicit ec: EC, system: ActorSystem): Fu[A] = {
Future firstCompletedOf Seq(
fua,
akka.pattern.after(duration, system.scheduler)(Future failed error)
)
}
def withTimeoutDefault(
duration: FiniteDuration,
default: => A
)(implicit ec: EC, system: ActorSystem): Fu[A] = {
Future firstCompletedOf Seq(
fua,
akka.pattern.after(duration, system.scheduler)(Future(default))
)
}
def delay(duration: FiniteDuration)(implicit ec: EC, system: ActorSystem) =
lila.common.Future.delay(duration)(fua)
def chronometer = Chronometer(fua)
def chronometerTry = Chronometer.lapTry(fua)
def mon(path: lila.mon.TimerPath) = chronometer.mon(path).result
def monTry(path: Try[A] => lila.mon.TimerPath) = chronometerTry.mon(r => path(r)(lila.mon)).result
def monSuccess(path: lila.mon.type => Boolean => kamon.metric.Timer) =
chronometerTry.mon { r =>
path(lila.mon)(r.isSuccess)
}.result
def monValue(path: A => lila.mon.TimerPath) = chronometer.monValue(path).result
def logTime(name: String) = chronometer pp name
def logTimeIfGt(name: String, duration: FiniteDuration) = chronometer.ppIfGt(name, duration)
def recoverDefault(implicit z: Zero[A], ec: EC): Fu[A] = recoverDefault(z.zero)
def recoverDefault(default: => A)(implicit ec: EC): Fu[A] =
fua recover {
case _: LilaException => default
case _: java.util.concurrent.TimeoutException => default
case e: Exception =>
lila.log("common").warn("Future.recoverDefault", e)
default
}
}
final class PimpedFutureBoolean(private val fua: Fu[Boolean]) extends AnyVal {
def >>&(fub: => Fu[Boolean]): Fu[Boolean] =
fua.flatMap { if (_) fub else fuFalse }(EC.parasitic)
def >>|(fub: => Fu[Boolean]): Fu[Boolean] =
fua.flatMap { if (_) fuTrue else fub }(EC.parasitic)
@inline def unary_! = fua.map { !_ }(EC.parasitic)
}
final class PimpedFutureOption[A](private val fua: Fu[Option[A]]) extends AnyVal {
def orFail(msg: => String)(implicit ec: EC): Fu[A] =
fua flatMap {
_.fold[Fu[A]](fufail(msg))(fuccess)
}
def orFailWith(err: => Exception)(implicit ec: EC): Fu[A] =
fua flatMap {
_.fold[Fu[A]](fufail(err))(fuccess)
}
def orElse(other: => Fu[Option[A]])(implicit ec: EC): Fu[Option[A]] =
fua flatMap {
_.fold(other) { x =>
fuccess(Some(x))
}
}
def getOrElse(other: => Fu[A])(implicit ec: EC): Fu[A] = fua flatMap { _.fold(other)(fuccess) }
def map2[B](f: A => B)(implicit ec: EC): Fu[Option[B]] = fua.map(_ map f)
def dmap2[B](f: A => B): Fu[Option[B]] = fua.map(_ map f)(EC.parasitic)
def getIfPresent: Option[A] =
fua.value match {
case Some(scala.util.Success(v)) => v
case _ => None
}
}
// final class PimpedFutureValid[A](private val fua: Fu[Valid[A]]) extends AnyVal {
// def flatten: Fu[A] = fua.flatMap {
// _.fold[Fu[A]](fufail(_), fuccess(_))
// }(EC.parasitic)
// }
final class PimpedIterableFuture[A, M[X] <: IterableOnce[X]](private val t: M[Fu[A]]) extends AnyVal {
def sequenceFu(implicit bf: BuildFrom[M[Fu[A]], A, M[A]], ec: EC): Fu[M[A]] = Future.sequence(t)
}
|
luanlv/lila
|
modules/common/src/main/base/PimpedFutures.scala
|
Scala
|
mit
| 7,009
|
package collins.solr
import models.{Asset, AssetLog, Page, PageParams, SortDirection}
import models.asset.AssetView
import play.api.Logger
import org.apache.solr.client.solrj.{SolrQuery, SolrServerException}
import org.apache.solr.common.SolrDocument
import Solr._
import SortDirection._
/**
* This class is a full search query, which includes an expression along with
* sorting and pagination parameters
*/
abstract class CollinsSearchQuery[T](docType: SolrDocType, query: TypedSolrExpression, page: PageParams) {
private[this] val logger = Logger("CollinsSearchQuery")
def getResults(): Either[String, (Seq[T], Long)] = Solr.server.map{server =>
val q = new SolrQuery
val queryString = query.toSolrQueryString
docType.keyResolver.either(page.sortField).right.flatMap{k => k.sortKey.map{Right(_)}.getOrElse(Left("Cannot sort on " + k.name))}.right.flatMap { sortKey =>
logger.debug("SOLR: " + queryString + "| sort: " + sortKey.name)
q.setQuery(queryString)
q.setStart(page.offset)
q.setRows(page.size)
q.addSortField(sortKey.resolvedName, getSortDirection)
try {
val response = server.query(q)
val results = response.getResults
Right((results.toArray.toSeq.map {
case doc: SolrDocument => parseDocument(doc)
case other =>
logger.warn("Got something weird back from Solr %s".format(other.toString))
None
}.flatten, results.getNumFound))
} catch {
case e => Left(e.getMessage + "(query %s)".format(queryString))
}
}
}.getOrElse(Left("Solr Plugin not initialized!"))
def getPage(): Either[String, Page[T]] = getResults().right.map{case (results, total) =>
Page(results, page.page, page.page * page.size, total)
}
protected def getSortDirection() = {
if (page.sort == SortAsc)
SolrQuery.ORDER.asc
else
SolrQuery.ORDER.desc
}
def parseDocument(doc: SolrDocument): Option[T]
}
case class AssetSearchQuery(query: TypedSolrExpression, page: PageParams) extends CollinsSearchQuery[Asset](AssetDocType, query, page) {
def parseDocument(doc: SolrDocument) = Asset.findByTag(doc.getFieldValue("TAG").toString)
}
case class AssetLogSearchQuery(query: TypedSolrExpression, page: PageParams) extends CollinsSearchQuery[AssetLog](AssetLogDocType, query, page) {
def parseDocument(doc: SolrDocument) = AssetLog.findById(Integer.parseInt(doc.getFieldValue("ID").toString))
}
|
Shopify/collins
|
app/collins/solr/CollinsSearchQuery.scala
|
Scala
|
apache-2.0
| 2,470
|
package com.sksamuel.avro4s.schema
import com.sksamuel.avro4s.{AvroSchema, Encoder, ImmutableRecord, SchemaFor}
import com.sksamuel.avro4s.examples.UppercasePkg.ClassInUppercasePackage
import com.sksamuel.avro4s.schemas.JavaStringSchemaFor
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class StringSchemasTest extends AnyWordSpec with Matchers {
"SchemaEncoder" should {
"accept strings" in {
case class Test(str: String)
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/string.json"))
val schema = AvroSchema[Test]
schema.toString(true) shouldBe expected.toString(true)
}
"encode strings as java strings when JavaStringSchemaFor is in scope" in {
case class Foo(s: String)
given SchemaFor[String] = JavaStringSchemaFor
val schema = AvroSchema[Foo]
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/java_string.json"))
schema.toString(true) shouldBe expected.toString(true)
}
}
}
|
sksamuel/avro4s
|
avro4s-core/src/test/scala/com/sksamuel/avro4s/schema/StringSchemasTest.scala
|
Scala
|
apache-2.0
| 1,075
|
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.httpclient.endpoint
import akka.actor.{ActorSystem, ExtensionId, Extension, ExtendedActorSystem}
import org.squbs.pipeline.{PipelineManager, PipelineSetting}
import spray.http.Uri
import scala.collection.mutable.ListBuffer
import org.squbs.httpclient.env.{Default, Environment}
import org.squbs.httpclient.Configuration
import com.typesafe.scalalogging.LazyLogging
case class Endpoint(uri: Uri, config: Configuration = Configuration())
object Endpoint {
def check(endpoint: String) = {
require(endpoint.toLowerCase.startsWith("http://") || endpoint.toLowerCase.startsWith("https://"),
"service should be started with http:// or https://")
}
}
trait EndpointResolver {
def name: String
def resolve(svcName: String, env: Environment = Default): Option[Endpoint]
}
abstract class PipelineAwareEndpointResolver(system: ActorSystem) extends EndpointResolver {
protected def getPipelineSetting(name: String): Option[PipelineSetting] = PipelineManager(system).getPipelineSetting(name)(system)
}
class EndpointRegistryExtension(system: ExtendedActorSystem) extends Extension with LazyLogging {
val endpointResolvers = ListBuffer[EndpointResolver]()
def register(resolver: EndpointResolver) = {
endpointResolvers.find(_.name == resolver.name) match {
case None =>
endpointResolvers.prepend(resolver)
case Some(routing) =>
logger.warn(s"Endpoint Resolver: ${resolver.name} already registered, skipped!")
}
}
def unregister(name: String) = {
endpointResolvers.find(_.name == name) match {
case None =>
logger.warn("Endpoint Resolver:" + name + " cannot be found, skipped unregister!")
case Some(resolver) =>
endpointResolvers.remove(endpointResolvers.indexOf(resolver))
}
}
def route(svcName: String, env: Environment = Default): Option[EndpointResolver] = {
endpointResolvers.find(_.resolve(svcName, env) != None)
}
def resolve(svcName: String, env: Environment = Default): Option[Endpoint] = {
val resolvedEndpoint = endpointResolvers.foldLeft[Option[Endpoint]](None) {
(endpoint: Option[Endpoint], resolver: EndpointResolver) =>
endpoint match {
case Some(_) =>
endpoint
case None =>
resolver.resolve(svcName, env)
}
}
resolvedEndpoint match {
case Some(ep) =>
logger.debug(s"Endpoint can be resolved by ($svcName, $env), the endpoint uri is:" + ep.uri)
resolvedEndpoint
case None if svcName != null && (svcName.startsWith("http://") || svcName.startsWith("https://")) =>
logger.debug(s"Endpoint can be resolved with service name match http:// or https:// pattern by " +
s"($svcName, $env), the endpoint uri is:" + svcName)
Some(Endpoint(svcName))
case _ =>
logger.warn(s"Endpoint can not be resolved by ($svcName, $env)!")
None
}
}
}
object EndpointRegistry extends ExtensionId[EndpointRegistryExtension] {
override def createExtension(system: ExtendedActorSystem): EndpointRegistryExtension =
new EndpointRegistryExtension(system)
}
|
keshin/squbs
|
squbs-httpclient/src/main/scala/org/squbs/httpclient/endpoint/HttpClientEndpoint.scala
|
Scala
|
apache-2.0
| 3,737
|
package streamz.example
import akka.actor.ActorSystem
import scalaz.concurrent.Task
import scalaz.std.string._
import scalaz.stream.Process
import streamz.akka.persistence._
object PersistenceExample {
implicit val system = ActorSystem("example")
val p1: Process[Task, Event[Any]] = replay("processor-1")
val p2: Process[Task, Event[Any]] = replay("processor-1", from = 3L)
val p3: Process[Task, String] = p1.scan("")((acc, evt) => acc + evt.data)
val p4: Process[Task, String] = for {
s @ Snapshot(md, data) <- snapshot[String]("processor-1")
currentState <- replay(md.persistenceId, s.nextSequenceNr).scan(data)((acc, evt) => acc + evt.data)
} yield currentState
val p5: Process[Task, Unit] = Process("a", "b", "c").journal("processor-2")
}
|
Astrac/streamz
|
streamz-akka-persistence/src/test/scala/streamz/example/PersistenceExample.scala
|
Scala
|
apache-2.0
| 779
|
package org.scalamu.plugin.mutators
import org.scalamu.plugin.MutatingTransformer
private[mutators] trait AbstractReplacementRules extends DomainAware {
self: MutatingTransformer =>
protected def replaceWith(input: Domain): global.Tree
}
|
sugakandrey/scalamu
|
scalac-plugin/src/main/scala/org/scalamu/plugin/mutators/AbstractReplacementRules.scala
|
Scala
|
gpl-3.0
| 248
|
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.quinine.metrics.targeted
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.{ AlignmentRecord, Contig }
import scala.math.abs
class ReadStatsSuite extends ADAMFunSuite {
val qualThreshold = 20
val contig1 = Contig.newBuilder()
.setContigName("ctg1")
.build()
def qualString(qual: Int, length: Int): String = {
// toChar.toString reads funny, but .toString on Int returns the int as a string literal
// e.g.,
// 'a'.toString = "a"
// 'a'.toInt = 97
// 'a'.toInt.toString = "97"
// 'a'.toInt.toChar.toString = "a"
(33 + qual).toChar.toString * length
}
// my, the perfect read is free of defects!
val perfectRead = AlignmentRecord.newBuilder()
.setSequence("A" * 10)
.setQual(qualString(65, 10))
.setDuplicateRead(false)
.setFailedVendorQualityChecks(false)
.setReadMapped(true)
.setStart(1L)
.setEnd(11L)
.setContig(contig1)
.setReadPaired(true)
.setMateMapped(true)
.setReadInFragment(0)
.setMateContig(contig1)
.setMateAlignmentStart(100L)
.setMateAlignmentEnd(110L)
.setMapq(65)
.build()
// similar to the perfect read, but has no qual
val noQualRead = AlignmentRecord.newBuilder(perfectRead)
.setQual(null)
.build()
// similar to the perfect read, but failed the vendor quality checks
val failedVendorChecksRead = AlignmentRecord.newBuilder(perfectRead)
.setFailedVendorQualityChecks(true)
.build()
// similar to the perfect read, but marked as a duplicate
val duplicateRead = AlignmentRecord.newBuilder(perfectRead)
.setDuplicateRead(true)
.build()
// similar to the perfect read, but all bases are low quality
val lowBaseQualityRead = AlignmentRecord.newBuilder(perfectRead)
.setQual(qualString(19, 10))
.build()
// similar to the perfect read, but the start and end bases are low quality
val lowBaseQualityEndsRead = AlignmentRecord.newBuilder(perfectRead)
.setQual(qualString(19, 1) + qualString(20, 8) + qualString(19, 1))
.build()
// similar to the perfect read, but with zero mapq
val mapQZeroRead = AlignmentRecord.newBuilder(perfectRead)
.setMapq(0)
.build()
// similar to the perfect read, but with mapq = 19
val poorlyMappedRead = AlignmentRecord.newBuilder(perfectRead)
.setMapq(19)
.build()
// similar to the perfect read, but overlaps its mate
val overlappingFirstOfPairRead = AlignmentRecord.newBuilder(perfectRead)
.setMateAlignmentStart(5L)
.setMateAlignmentEnd(15L)
.build()
// similar to the overlapping first-of-pair read, but the second read in the template
val overlappingSecondOfPairRead = AlignmentRecord.newBuilder(overlappingFirstOfPairRead)
.setReadInFragment(1)
.build()
// similar to the perfect read, but without a pair
val unpairedRead = AlignmentRecord.newBuilder(perfectRead)
.setReadInFragment(null)
.setReadPaired(false)
.setMateContig(null)
.setMateAlignmentStart(null)
.setMateAlignmentEnd(null)
.build()
def stat(read: AlignmentRecord) = ReadStats(read, qualThreshold, qualThreshold)
def genericAsserts(rs: ReadStats,
dupe: Boolean = false,
mapQZero: Boolean = false,
poorlyMapped: Boolean = false,
lowQualBases: Long = 0L,
overlapping: Boolean = false,
failedChecks: Boolean = false) {
assert(rs.reads === 1L)
assert(rs.bases === 10L)
if (!failedChecks) {
assert(rs.readsPassingVendorQualityFilter === 1L)
} else {
assert(rs.readsPassingVendorQualityFilter === 0L)
}
if (!dupe) {
assert(rs.uniqueReads === 1L)
assert(rs.duplicateBases === 0L)
} else {
assert(rs.uniqueReads === 0L)
assert(rs.duplicateBases === 10L)
}
// dupe reads are not counted
if (!mapQZero && !dupe) {
assert(rs.nonZeroMapQReads === 1L)
assert(rs.nonZeroMapQBases === 10L)
} else {
assert(rs.nonZeroMapQReads === 0L)
assert(rs.nonZeroMapQBases === 0L)
}
if (!overlapping) {
assert(rs.overlapBases === 0L)
}
if (!poorlyMapped && !mapQZero) {
assert(rs.lowMapQBases === 0L)
} else {
assert(rs.lowMapQBases === 10L)
}
assert(rs.lowQualityBases === lowQualBases)
}
test("perfect read") {
val rs = stat(perfectRead)
genericAsserts(rs)
}
test("noQualRead should be equivalent to the perfect read, and should not NPE") {
val rs = stat(noQualRead)
genericAsserts(rs)
}
test("read failed vendor qc") {
val rs = stat(failedVendorChecksRead)
genericAsserts(rs, failedChecks = true)
}
test("read is a dupe") {
val rs = stat(duplicateRead)
genericAsserts(rs, dupe = true)
}
test("all of the bases in a read are low quality") {
val rs = stat(lowBaseQualityRead)
genericAsserts(rs, lowQualBases = 10L)
}
test("the start/end of the read have low quality scores") {
val rs = stat(lowBaseQualityEndsRead)
genericAsserts(rs, lowQualBases = 2L)
}
test("read's mapping quality is zero") {
val rs = stat(mapQZeroRead)
genericAsserts(rs, mapQZero = true)
}
test("read is mapped with low, but non-zero mapping quality") {
val rs = stat(poorlyMappedRead)
genericAsserts(rs, poorlyMapped = true)
}
test("first-of-pair read overlaps mate") {
val rs = stat(overlappingFirstOfPairRead)
genericAsserts(rs)
}
test("second-of-pair read overlaps mate") {
val rs = stat(overlappingSecondOfPairRead)
genericAsserts(rs, overlapping = true)
}
test("unpaired, but otherwise perfect read") {
val rs = stat(unpairedRead)
genericAsserts(rs)
}
test("merge two read stats instances") {
val rs1 = ReadStats(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L)
val rs2 = ReadStats(11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L)
val mergedRs = rs1 ++ rs2
assert(mergedRs.reads === 12L)
assert(mergedRs.bases === 14L)
assert(mergedRs.readsPassingVendorQualityFilter === 16L)
assert(mergedRs.uniqueReads === 18L)
assert(mergedRs.nonZeroMapQReads === 20L)
assert(mergedRs.nonZeroMapQBases === 22L)
assert(mergedRs.lowQualityBases === 24L)
assert(mergedRs.duplicateBases === 26L)
assert(mergedRs.lowMapQBases === 28L)
assert(mergedRs.overlapBases === 30L)
}
sparkTest("run an aggregate across all reads") {
val reads = Seq(perfectRead,
failedVendorChecksRead,
duplicateRead,
lowBaseQualityRead,
lowBaseQualityEndsRead,
mapQZeroRead,
poorlyMappedRead,
overlappingFirstOfPairRead,
overlappingSecondOfPairRead,
unpairedRead)
val rs = ReadStats(sc.parallelize(reads), qualThreshold, qualThreshold)
assert(rs.reads === 10L)
assert(rs.bases === 100L)
assert(rs.readsPassingVendorQualityFilter === 9L)
assert(rs.uniqueReads === 9L)
assert(rs.nonZeroMapQReads === 8L)
assert(rs.nonZeroMapQBases === 80L)
assert(rs.lowQualityBases === 12L)
assert(rs.duplicateBases === 10L)
assert(rs.lowMapQBases === 20L)
assert(rs.overlapBases === 10L)
assert(fpCompare(rs.percentPassingChecks, 90.0))
assert(fpCompare(rs.percentUniqueHighQualityReads, 80.0))
assert(fpCompare(rs.percentLowQualityBases, 12.0))
assert(fpCompare(rs.percentDuplicateBases, 10.0))
assert(fpCompare(rs.percentLowMapQBases, 20.0))
assert(fpCompare(rs.percentOverlapBases, 10.0))
}
def fpCompare(a: Double, b: Double, absTol: Double = 1e-3): Boolean = {
abs(a - b) <= absTol
}
}
|
bigdatagenomics/qc-metrics
|
quinine-core/src/test/scala/org/bdgenomics/quinine/metrics/targeted/ReadStatsSuite.scala
|
Scala
|
apache-2.0
| 8,446
|
package org.alitouka.spark.dbscan
import org.alitouka.spark.dbscan.util.commandLine.{CommonArgs, CommonArgsParser}
import org.apache.commons.math3.ml.distance.{ManhattanDistance, EuclideanDistance}
class CommandLineParsingSuite extends DbscanSuiteBase {
val masterUrl = "spark://localhost:7777"
val jar = "hdfs://somewhere/dbscan_prototype.jar"
val inputPath = "hdfs://somewhere/in"
val outputPath = "hdfs://somewhere/out"
val eps = 0.1
val minPts = 3
val customDistanceMeasureClassName = "org.apache.commons.math3.ml.distance.ManhattanDistance"
val numBuckets = 100
val requiredArgs = Array ("--ds-master", masterUrl, "--ds-jar", jar, "--ds-input", inputPath, "--ds-output", outputPath)
val dbscanAdditionalArgs = Array ("--eps", eps.toString, "--numPts", minPts.toString)
val distanceMeasureArg = Array ("--distanceMeasure", customDistanceMeasureClassName)
val borderPointsAsNoiseArg = Array ("--borderPointsAsNoise", "1")
val numberOfBucketsArg = Array ("--numBuckets", numBuckets.toString)
test ("DbscanDriver.OptionParser should find all required arguments") {
val parser = new DbscanDriver.ArgsParser ()
val args = requiredArgs ++ dbscanAdditionalArgs
val parsingResult = parser.parse(args)
assert (parsingResult)
assertThatDbscanRequiredArgumentsWereFound(parser)
parser.args.distanceMeasure shouldBe a [EuclideanDistance]
parser.args.borderPointsAsNoise should equal (DbscanSettings.getDefaultTreatmentOfBorderPoints)
}
test("DbscanDriver.OptionParser should recognize custom distance measure and treatment of border points") {
val parser = new DbscanDriver.ArgsParser ()
val args = requiredArgs ++ dbscanAdditionalArgs ++ distanceMeasureArg ++ borderPointsAsNoiseArg
val parsingResult = parser.parse(args)
assert (parsingResult)
assertThatDbscanRequiredArgumentsWereFound(parser)
parser.args.distanceMeasure shouldBe a [ManhattanDistance]
assert (parser.args.borderPointsAsNoise)
}
def assertThatDbscanRequiredArgumentsWereFound (parser: DbscanDriver.ArgsParser) {
parser.args.eps should equal (eps)
parser.args.minPts should equal (minPts)
assertThatCommonRequiredArgumentsWereFound(parser)
}
def assertThatCommonRequiredArgumentsWereFound [C <: CommonArgs] (parser: CommonArgsParser[C]) {
parser.args.masterUrl should equal (masterUrl)
parser.args.jar should equal (jar)
parser.args.inputPath should equal (inputPath)
parser.args.outputPath should equal (outputPath)
}
}
|
zerosign/spark_dbscan
|
src/test/scala/org/alitouka/spark/dbscan/CommandLineParsingSuite.scala
|
Scala
|
apache-2.0
| 2,529
|
/*
* AudioContextExt.scala
* (Cord)
*
* Copyright (c) 2015-2020 Hanns Holger Rutz.
*
* This software is published under the GNU Lesser General Public License v2.1+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.cord.audio
import org.scalajs.dom.AudioContext
import scala.scalajs.js
/** Includes extensions not present in the original `AudioContext` class. */
@js.native
trait AudioContextExt extends AudioContext {
/** The `createScriptProcessor()` method of the `AudioContext` interface creates
* a `ScriptProcessorNode` used for direct audio processing.
*
* @param bufferSize The buffer size in units of sample-frames. If specified,
* the bufferSize must be one of the following values:
* 256, 512, 1024, 2048, 4096, 8192, 16384. If it's not
* passed in, or if the value is 0, then the implementation
* will choose the best buffer size for the given environment,
* which will be a constant power of 2 throughout the
* lifetime of the node.
*
* This value controls how frequently the audioprocess event
* is dispatched and how many sample-frames need to be processed
* each call. Lower values for `bufferSize` will result in a
* lower (better) latency. Higher values will be necessary to
* avoid audio breakup and glitches. It is recommended for
* authors to not specify this buffer size and allow the
* implementation to pick a good buffer size to balance between
* latency and audio quality.
*
* @param numberOfInputChannels Integer specifying the number of channels for this node's
* input, defaults to 2. Values of up to 32 are supported.
*
* @param numberOfOutputChannels Integer specifying the number of channels for this node's
* output, defaults to 2. Values of up to 32 are supported.
*/
def createScriptProcessor(bufferSize : Int = 0,
numberOfInputChannels : Int = 2,
numberOfOutputChannels: Int = 2): ScriptProcessorNode = js.native
}
|
Sciss/Cord
|
src/main/scala/de/sciss/cord/audio/AudioContextExt.scala
|
Scala
|
lgpl-2.1
| 2,588
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.entity
import scala.util.Try
import spray.json.DefaultJsonProtocol
import spray.json.JsNull
import spray.json.JsString
import spray.json.JsValue
import spray.json.RootJsonFormat
import spray.json.deserializationError
import org.apache.openwhisk.core.entity.ArgNormalizer.trim
/**
* A DocId is the document id === primary key in the datastore.
*
* It is a value type (hence == is .equals, immutable and cannot be assigned null).
* The constructor is private so that argument requirements are checked and normalized
* before creating a new instance.
*
* @param id the document id, required not null
*/
protected[core] class DocId(val id: String) extends AnyVal {
def asString = id // to make explicit that this is a string conversion
protected[core] def asDocInfo = DocInfo(this)
protected[core] def asDocInfo(rev: DocRevision) = DocInfo(this, rev)
protected[entity] def toJson = JsString(id)
override def toString = id
}
/**
* A DocRevision is the document revision, an opaque value that may be
* determined by the datastore.
*
* It is a value type (hence == is .equals, immutable and cannot be assigned null).
* The constructor is private so that argument requirements are checked and normalized
* before creating a new instance.
*
* @param rev the document revision, optional
*/
protected[core] class DocRevision private (val rev: String) extends AnyVal {
def asString = rev // to make explicit that this is a string conversion
def empty = rev == null
override def toString = rev
}
/**
* Document Info wrapping the document id and revision. The constructor
* is protected to make sure id and rev are well formed and defined. Use
* one of the factories in the companion object where necessary. Since
* the id and rev are values, the type system ensures they are not null.
*
* @param id the document id
* @param rev the document revision, optional; this is an opaque value determined by the datastore
*/
protected[core] case class DocInfo protected[entity] (id: DocId, rev: DocRevision = DocRevision.empty) {
override def toString = {
if (rev.empty) {
s"id: $id"
} else {
s"id: $id, rev: $rev"
}
}
override def hashCode = {
if (rev.empty) {
id.hashCode
} else {
s"$id.$rev".hashCode
}
}
}
/**
* A BulkEntityResult is wrapping the fields that are returned for a single document on a bulk-put of several documents.
* http://docs.couchdb.org/en/2.1.0/api/database/bulk-api.html#post--db-_bulk_docs
*
* @param id the document id
* @param rev the document revision, optional; this is an opaque value determined by the datastore
* @param error the error, that occured on trying to put this document into CouchDB
* @param reason the error message that correspands to the error
*/
case class BulkEntityResult(id: String, rev: Option[DocRevision], error: Option[String], reason: Option[String]) {
def toDocInfo = DocInfo(DocId(id), rev.getOrElse(DocRevision.empty))
}
protected[core] object DocId extends ArgNormalizer[DocId] {
/**
* Unapply method for convenience of case matching.
*/
def unapply(s: String): Option[DocId] = Try(DocId(s)).toOption
implicit val serdes = new RootJsonFormat[DocId] {
def write(d: DocId) = d.toJson
def read(value: JsValue) =
Try {
val JsString(s) = value
new DocId(s)
} getOrElse deserializationError("doc id malformed")
}
}
protected[core] object DocRevision {
/**
* Creates a DocRevision. Normalizes the revision if necessary.
*
* @param s is the document revision as a string, may be null
* @return DocRevision
*/
protected[core] def apply(s: String): DocRevision = new DocRevision(trim(s))
protected[core] val empty: DocRevision = new DocRevision(null)
implicit val serdes = new RootJsonFormat[DocRevision] {
def write(d: DocRevision) = if (d.rev != null) JsString(d.rev) else JsNull
def read(value: JsValue) = value match {
case JsString(s) => DocRevision(s)
case JsNull => DocRevision.empty
case _ => deserializationError("doc revision malformed")
}
}
}
protected[core] object DocInfo extends DefaultJsonProtocol {
/**
* Creates a DocInfo with id set to the argument and no revision.
*
* @param id is the document identifier, must be defined
* @throws IllegalArgumentException if id is null or empty
*/
@throws[IllegalArgumentException]
protected[core] def apply(id: String): DocInfo = DocInfo(DocId(id))
/**
* Creates a DocInfo with id and revision per the provided arguments.
*
* @param id is the document identifier, must be defined
* @param rev the document revision, optional
* @return DocInfo for id and revision
* @throws IllegalArgumentException if id is null or empty
*/
@throws[IllegalArgumentException]
protected[core] def !(id: String, rev: String): DocInfo = DocInfo(DocId(id), DocRevision(rev))
implicit val serdes = jsonFormat2(DocInfo.apply)
}
object BulkEntityResult extends DefaultJsonProtocol {
implicit val serdes = jsonFormat4(BulkEntityResult.apply)
}
|
starpit/openwhisk
|
common/scala/src/main/scala/org/apache/openwhisk/core/entity/DocInfo.scala
|
Scala
|
apache-2.0
| 5,940
|
package org.jetbrains.sbt
package project
import java.io.File
import java.net.URL
import java.util
import com.intellij.execution.configurations.SimpleJavaParameters
import com.intellij.openapi.application.{ApplicationManager, PathManager}
import com.intellij.openapi.externalSystem.model.ExternalSystemException
import com.intellij.openapi.externalSystem.service.project.autoimport.CachingExternalSystemAutoImportAware
import com.intellij.openapi.externalSystem.util._
import com.intellij.openapi.externalSystem.{ExternalSystemAutoImportAware, ExternalSystemConfigurableAware, ExternalSystemManager}
import com.intellij.openapi.options.Configurable
import com.intellij.openapi.project.Project
import com.intellij.openapi.projectRoots.impl.JavaAwareProjectJdkTableImpl
import com.intellij.openapi.projectRoots.{JavaSdkType, ProjectJdkTable}
import com.intellij.openapi.roots.ProjectRootManager
import com.intellij.testFramework.IdeaTestUtil
import com.intellij.util.net.HttpConfigurable
import org.jetbrains.android.sdk.AndroidSdkType
import org.jetbrains.sbt.project.settings._
import org.jetbrains.sbt.settings.{SbtExternalSystemConfigurable, SbtSystemSettings}
import scala.collection.mutable
/**
* @author Pavel Fatin
*/
class SbtExternalSystemManager
extends ExternalSystemManager[SbtProjectSettings, SbtProjectSettingsListener, SbtSystemSettings, SbtLocalSettings, SbtExecutionSettings]
with ExternalSystemConfigurableAware {
def enhanceLocalProcessing(urls: util.List[URL]) {
urls.add(jarWith[scala.App].toURI.toURL)
}
def enhanceRemoteProcessing(parameters: SimpleJavaParameters) {
val classpath = parameters.getClassPath
classpath.add(jarWith[this.type])
classpath.add(jarWith[org.jetbrains.sbt.structure.XmlSerializer[_]])
classpath.add(jarWith[scala.App])
classpath.add(jarWith[scala.xml.Node])
parameters.getVMParametersList.addProperty(
ExternalSystemConstants.EXTERNAL_SYSTEM_ID_KEY, SbtProjectSystem.Id.getId)
parameters.getVMParametersList.addProperty(
PathManager.PROPERTY_LOG_PATH, PathManager.getLogPath)
}
def getSystemId = SbtProjectSystem.Id
def getSettingsProvider = SbtSystemSettings.getInstance _
def getLocalSettingsProvider = SbtLocalSettings.getInstance _
def getExecutionSettingsProvider = SbtExternalSystemManager.executionSettingsFor _
def getProjectResolverClass = classOf[SbtProjectResolver]
def getTaskManagerClass = classOf[SbtTaskManager]
def getExternalProjectDescriptor = new SbtOpenProjectDescriptor()
def getConfigurable(project: Project): Configurable = new SbtExternalSystemConfigurable(project)
}
object SbtExternalSystemManager {
def executionSettingsFor(project: Project, path: String) = {
val settings = SbtSystemSettings.getInstance(project)
val projectSettings = Option(settings.getLinkedProjectSettings(path)).getOrElse(SbtProjectSettings.default)
val customLauncher = settings.customLauncherEnabled.option(settings.getCustomLauncherPath).map(_.toFile)
val customSbtStructureFile = settings.customSbtStructurePath.nonEmpty.option(settings.customSbtStructurePath.toFile)
val realProjectPath = Option(projectSettings.getExternalProjectPath).getOrElse(path)
val projectJdkName = getProjectJdkName(project, projectSettings)
val vmExecutable = getVmExecutable(projectJdkName, settings)
val vmOptions = getVmOptions(settings)
val environment = Map.empty ++ getAndroidEnvironmentVariables(projectJdkName)
new SbtExecutionSettings(realProjectPath,
vmExecutable, vmOptions, environment, customLauncher, customSbtStructureFile, projectJdkName,
projectSettings.resolveClassifiers, projectSettings.resolveJavadocs, projectSettings.resolveSbtClassifiers)
}
private def getProjectJdkName(project: Project, projectSettings: SbtProjectSettings): Option[String] = {
val jdkInProject = Option(ProjectRootManager.getInstance(project).getProjectSdk).map(_.getName)
val jdkInImportSettings = projectSettings.jdkName
jdkInImportSettings.orElse(jdkInProject)
}
private def getVmExecutable(projectJdkName: Option[String], settings: SbtSystemSettings): File =
if (!ApplicationManager.getApplication.isUnitTestMode)
getRealVmExecutable(projectJdkName, settings)
else
getUnitTestVmExecutable
private def getUnitTestVmExecutable: File = {
val internalSdk = JavaAwareProjectJdkTableImpl.getInstanceEx.getInternalJdk
val sdk = if (internalSdk == null) IdeaTestUtil.getMockJdk17 else internalSdk
val sdkType = sdk.getSdkType.asInstanceOf[JavaSdkType]
new File(sdkType.getVMExecutablePath(sdk))
}
private def getRealVmExecutable(projectJdkName: Option[String], settings: SbtSystemSettings): File = {
val customVmFile = new File(settings.getCustomVMPath) / "bin" / "java"
val customVmExecutable = settings.customVMEnabled.option(customVmFile)
customVmExecutable.orElse {
val projectSdk = projectJdkName.flatMap(name => Option(ProjectJdkTable.getInstance().findJdk(name)))
projectSdk.map { sdk =>
sdk.getSdkType match {
case sdkType : JavaSdkType =>
new File(sdkType.getVMExecutablePath(sdk))
case _ =>
throw new ExternalSystemException(SbtBundle("sbt.import.noProjectJvmFound"))
}
}
} getOrElse {
throw new ExternalSystemException(SbtBundle("sbt.import.noCustomJvmFound"))
}
}
private def getAndroidEnvironmentVariables(projectJdkName: Option[String]): Map[String, String] =
projectJdkName
.flatMap(name => Option(ProjectJdkTable.getInstance().findJdk(name)))
.flatMap { sdk =>
try {
sdk.getSdkType.isInstanceOf[AndroidSdkType].option(Map("ANDROID_HOME" -> sdk.getSdkModificator.getHomePath))
} catch {
case _ : NoClassDefFoundError => None
}
}.getOrElse(Map.empty)
private def getVmOptions(settings: SbtSystemSettings): Seq[String] = {
val userOptions = settings.getVmParameters.split("\\\\s+").toSeq
val ideaProxyOptions = proxyOptionsFor(HttpConfigurable.getInstance).filterNot { opt =>
val optName = opt.split('=').head + "="
userOptions.exists(_.startsWith(optName))
}
Seq(s"-Xmx${settings.getMaximumHeapSize}M") ++ userOptions ++ ideaProxyOptions
}
private def proxyOptionsFor(http: HttpConfigurable): Seq[String] = {
val useProxy = http.USE_HTTP_PROXY && !http.PROXY_TYPE_IS_SOCKS
val useCredentials = useProxy && http.PROXY_AUTHENTICATION
useProxy.seq(s"-Dhttp.proxyHost=${http.PROXY_HOST}", s"-Dhttp.proxyPort=${http.PROXY_PORT}") ++
useCredentials.seq(s"-Dhttp.proxyUser=${http.PROXY_LOGIN}", s"-Dhttp.proxyPassword=${http.getPlainProxyPassword}")
}
}
|
whorbowicz/intellij-scala
|
src/org/jetbrains/sbt/project/SbtExternalSystemManager.scala
|
Scala
|
apache-2.0
| 6,731
|
/*
* DARWIN Genetic Algorithms Framework Project.
* Copyright (c) 2003, 2005, 2007, 2009, 2011, 2016, 2017. Phasmid Software
*
* Originally, developed in Java by Rubecula Software, LLC and hosted by SourceForge.
* Converted to Scala by Phasmid Software and hosted by github at https://github.com/rchillyard/Darwin
*
* This file is part of Darwin.
*
* Darwin is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.phasmid.darwin.evolution
import java.util
import com.phasmid.darwin.plugin.Listener
/**
* Created by scalaprof on 7/26/17.
*/
trait Evolver {
/**
* Add an evolvable object which undergoes a new generation once every tick
* of the clock.
*
* @param evolvable
* an { @link Evolvable} object, typically a { @link Taxon}.
*/
def addEvolvable(evolvable: Evolvable[_]): Unit
/**
* Add an evolvable object which undergoes a new generation once every
* <code>ticks</code> ticks of the clock.
*
* @param evolvable
* an { @link Evolvable} object, typically a { @link Taxon}.
* @param ticks
* the number of ticks of the clock per generation.
*/
def addEvolvable(evolvable: Evolvable[_], ticks: Int): Unit
/**
* Add a listener to the evolution process.
*
* @param listener the listener
* @return true if the listener was added
*/
def addListener(listener: Listener): Boolean
/**
* Method which is called before all user-interface components get
* destroyed. When an
* {@link EvolutionaryApplet} is employed as the
* user-interface, this method is called ny the
* {@link Applet#stop()}
* method.
*/
def cleanup(): Unit
/**
* @return the clockWatcher
*/
def getClockWatcher: ClockWatcher
/**
* @return the set of { @link Evolvable} objects.
*/
def getEvolvableKeys: util.Set[Evolvable[_]]
/**
* Method which is called after all user-interface issues have been dealt
* with. When an {@link EvolutionaryApplet} is employed as the
* user-interface, this method is called by the {@link Applet#start()}
* method.
*/
def init(): Unit
/**
* Increment the clock by one tick, firing new generations as appropriate.
*
* @return true if there is more evolution to do.
* @throws EvolutionException under some circumstances
*/
@throws[EvolutionException]
def next: Boolean
/**
* @param evolvable an evolvable
*/
def removeEvolvable(evolvable: Evolvable[_]): Unit
/**
* Seed the currently registered evolvables by calling
* {@link Evolvable#seedMembers()} on each one.
*/
def seedEvolvables(): Unit
/**
* @param clockWatcher
* the clockWatcher to set
*/
def setClockWatcher(clockWatcher: ClockWatcher): Unit
/**
* // * @param listener
*
* @return true if successful
*/
// def addVisualizableListener(listener: VisualizableListener): Boolean
}
|
rchillyard/Darwin
|
src/main/scala/com/phasmid/darwin/evolution/Evolver.scala
|
Scala
|
gpl-3.0
| 3,566
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import kafka.log.Log
import kafka.utils.Logging
import kafka.server.{LogOffsetMetadata, LogReadResult}
import kafka.common.KafkaException
import org.apache.kafka.common.errors.OffsetOutOfRangeException
import kafka.server.checkpoints.{LeaderEpochCheckpointFile, LeaderEpochFile}
import kafka.server.epoch.{LeaderEpochCache, LeaderEpochFileCache}
import org.apache.kafka.common.utils.Time
class Replica(val brokerId: Int,
val partition: Partition,
time: Time = Time.SYSTEM,
initialHighWatermarkValue: Long = 0L,
val log: Option[Log] = None) extends Logging {
// the high watermark offset value, in non-leader replicas only its message offsets are kept
@volatile private[this] var highWatermarkMetadata = new LogOffsetMetadata(initialHighWatermarkValue)
// the log end offset value, kept in all replicas;
// for local replica it is the log's end offset, for remote replicas its value is only updated by follower fetch
@volatile private[this] var logEndOffsetMetadata = LogOffsetMetadata.UnknownOffsetMetadata
// the log start offset value, kept in all replicas;
// for local replica it is the log's start offset, for remote replicas its value is only updated by follower fetch
@volatile private[this] var _logStartOffset = Log.UnknownLogStartOffset
// The log end offset value at the time the leader received the last FetchRequest from this follower
// This is used to determine the lastCaughtUpTimeMs of the follower
@volatile private[this] var lastFetchLeaderLogEndOffset = 0L
// The time when the leader received the last FetchRequest from this follower
// This is used to determine the lastCaughtUpTimeMs of the follower
@volatile private[this] var lastFetchTimeMs = 0L
// lastCaughtUpTimeMs is the largest time t such that the offset of most recent FetchRequest from this follower >=
// the LEO of leader at time t. This is used to determine the lag of this follower and ISR of this partition.
@volatile private[this] var _lastCaughtUpTimeMs = 0L
val topicPartition = partition.topicPartition
def isLocal: Boolean = log.isDefined
def lastCaughtUpTimeMs = _lastCaughtUpTimeMs
val epochs = log.map(_.leaderEpochCache)
/*
* If the FetchRequest reads up to the log end offset of the leader when the current fetch request is received,
* set `lastCaughtUpTimeMs` to the time when the current fetch request was received.
*
* Else if the FetchRequest reads up to the log end offset of the leader when the previous fetch request was received,
* set `lastCaughtUpTimeMs` to the time when the previous fetch request was received.
*
* This is needed to enforce the semantics of ISR, i.e. a replica is in ISR if and only if it lags behind leader's LEO
* by at most `replicaLagTimeMaxMs`. These semantics allow a follower to be added to the ISR even if the offset of its
* fetch request is always smaller than the leader's LEO, which can happen if small produce requests are received at
* high frequency.
*/
def updateLogReadResult(logReadResult : LogReadResult) {
if (logReadResult.info.fetchOffsetMetadata.messageOffset >= logReadResult.leaderLogEndOffset)
_lastCaughtUpTimeMs = math.max(_lastCaughtUpTimeMs, logReadResult.fetchTimeMs)
else if (logReadResult.info.fetchOffsetMetadata.messageOffset >= lastFetchLeaderLogEndOffset)
_lastCaughtUpTimeMs = math.max(_lastCaughtUpTimeMs, lastFetchTimeMs)
logStartOffset = logReadResult.followerLogStartOffset
logEndOffset = logReadResult.info.fetchOffsetMetadata
lastFetchLeaderLogEndOffset = logReadResult.leaderLogEndOffset
lastFetchTimeMs = logReadResult.fetchTimeMs
}
def resetLastCaughtUpTime(curLeaderLogEndOffset: Long, curTimeMs: Long, lastCaughtUpTimeMs: Long) {
lastFetchLeaderLogEndOffset = curLeaderLogEndOffset
lastFetchTimeMs = curTimeMs
_lastCaughtUpTimeMs = lastCaughtUpTimeMs
}
private def logEndOffset_=(newLogEndOffset: LogOffsetMetadata) {
if (isLocal) {
throw new KafkaException(s"Should not set log end offset on partition $topicPartition's local replica $brokerId")
} else {
logEndOffsetMetadata = newLogEndOffset
trace(s"Setting log end offset for replica $brokerId for partition $topicPartition to [$logEndOffsetMetadata]")
}
}
def logEndOffset =
if (isLocal)
log.get.logEndOffsetMetadata
else
logEndOffsetMetadata
def maybeIncrementLogStartOffset(offset: Long) {
if (isLocal) {
if (highWatermark.messageOffset < offset)
throw new OffsetOutOfRangeException(s"The specified offset $offset is higher than the high watermark" +
s" ${highWatermark.messageOffset} of the partition $topicPartition")
log.get.maybeIncrementLogStartOffset(offset)
} else {
throw new KafkaException(s"Should not try to delete records on partition $topicPartition's non-local replica $brokerId")
}
}
private def logStartOffset_=(newLogStartOffset: Long) {
if (isLocal) {
throw new KafkaException(s"Should not set log start offset on partition $topicPartition's local replica $brokerId " +
s"without attempting to delete records of the log")
} else {
_logStartOffset = newLogStartOffset
trace(s"Setting log start offset for remote replica $brokerId for partition $topicPartition to [$newLogStartOffset]")
}
}
def logStartOffset =
if (isLocal)
log.get.logStartOffset
else
_logStartOffset
def highWatermark_=(newHighWatermark: LogOffsetMetadata) {
if (isLocal) {
highWatermarkMetadata = newHighWatermark
trace(s"Setting high watermark for replica $brokerId partition $topicPartition to [$newHighWatermark]")
} else {
throw new KafkaException(s"Should not set high watermark on partition $topicPartition's non-local replica $brokerId")
}
}
def highWatermark = highWatermarkMetadata
def convertHWToLocalOffsetMetadata() = {
if (isLocal) {
highWatermarkMetadata = log.get.convertToOffsetMetadata(highWatermarkMetadata.messageOffset)
} else {
throw new KafkaException(s"Should not construct complete high watermark on partition $topicPartition's non-local replica $brokerId")
}
}
override def equals(that: Any): Boolean = that match {
case other: Replica => brokerId == other.brokerId && topicPartition == other.topicPartition
case _ => false
}
override def hashCode: Int = 31 + topicPartition.hashCode + 17 * brokerId
override def toString: String = {
val replicaString = new StringBuilder
replicaString.append("ReplicaId: " + brokerId)
replicaString.append("; Topic: " + partition.topic)
replicaString.append("; Partition: " + partition.partitionId)
replicaString.append("; isLocal: " + isLocal)
replicaString.append("; lastCaughtUpTimeMs: " + lastCaughtUpTimeMs)
if (isLocal) replicaString.append("; Highwatermark: " + highWatermark)
replicaString.toString
}
}
|
rhauch/kafka
|
core/src/main/scala/kafka/cluster/Replica.scala
|
Scala
|
apache-2.0
| 7,869
|
package im.actor.server.persist
import im.actor.server.model.UserPhone
import slick.dbio.Effect.{ Read, Write }
import slick.driver.PostgresDriver.api._
import slick.profile.{ FixedSqlAction, FixedSqlStreamingAction }
final class UserPhoneTable(tag: Tag) extends Table[UserPhone](tag, "user_phones") {
def userId = column[Int]("user_id", O.PrimaryKey)
def id = column[Int]("id", O.PrimaryKey)
def accessSalt = column[String]("access_salt")
def number = column[Long]("number")
def title = column[String]("title")
def * = (id, userId, accessSalt, number, title) <> (UserPhone.tupled, UserPhone.unapply)
}
object UserPhoneRepo {
val phones = TableQuery[UserPhoneTable]
val byPhoneNumber = Compiled { number: Rep[Long] β
phones.filter(_.number === number)
}
val phoneExists = Compiled { number: Rep[Long] β
phones.filter(_.number === number).exists
}
def exists(number: Long) = phoneExists(number).result
// TODO: rename to findByNumber
def findByPhoneNumber(number: Long) = byPhoneNumber(number).result
def findByNumbers(numbers: Set[Long]): FixedSqlStreamingAction[Seq[UserPhone], UserPhone, Read] =
phones.filter(_.number inSet numbers).result
def findByUserId(userId: Int): FixedSqlStreamingAction[Seq[UserPhone], UserPhone, Read] =
phones.filter(_.userId === userId).result
def findByUserIds(userIds: Set[Int]): FixedSqlStreamingAction[Seq[UserPhone], UserPhone, Read] =
phones.filter(_.userId inSet userIds).result
def create(id: Int, userId: Int, accessSalt: String, number: Long, title: String): FixedSqlAction[Int, NoStream, Write] =
phones += UserPhone(id, userId, accessSalt, number, title)
def create(userPhone: UserPhone): FixedSqlAction[Int, NoStream, Write] =
phones += userPhone
def updateTitle(userId: Int, id: Int, title: String) =
phones.filter(p β p.userId === userId && p.id === id).map(_.title).update(title)
}
|
ljshj/actor-platform
|
actor-server/actor-persist/src/main/scala/im/actor/server/persist/UserPhoneRepo.scala
|
Scala
|
mit
| 1,921
|
package tierney.core
import cats.~>
class LazyFunctionK[F[_], G[_]](value: => F ~> G) extends (F ~> G) {
override def apply[A](fa: F[A]) = value(fa)
}
|
m50d/tierney
|
core/src/main/scala/tierney/core/LazyFunctionK.scala
|
Scala
|
apache-2.0
| 154
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.nio.charset.{Charset, StandardCharsets}
import java.time.ZoneId
import java.util.Locale
import com.fasterxml.jackson.core.{JsonFactory, JsonFactoryBuilder}
import com.fasterxml.jackson.core.json.JsonReadFeature
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.internal.SQLConf
/**
* Options for parsing JSON data into Spark SQL rows.
*
* Most of these map directly to Jackson's internal options, specified in [[JsonReadFeature]].
*/
private[sql] class JSONOptions(
@transient val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
extends Logging with Serializable {
def this(
parameters: Map[String, String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String = "") = {
this(
CaseInsensitiveMap(parameters),
defaultTimeZoneId,
defaultColumnNameOfCorruptRecord)
}
val samplingRatio =
parameters.get("samplingRatio").map(_.toDouble).getOrElse(1.0)
val primitivesAsString =
parameters.get("primitivesAsString").map(_.toBoolean).getOrElse(false)
val prefersDecimal =
parameters.get("prefersDecimal").map(_.toBoolean).getOrElse(false)
val allowComments =
parameters.get("allowComments").map(_.toBoolean).getOrElse(false)
val allowUnquotedFieldNames =
parameters.get("allowUnquotedFieldNames").map(_.toBoolean).getOrElse(false)
val allowSingleQuotes =
parameters.get("allowSingleQuotes").map(_.toBoolean).getOrElse(true)
val allowNumericLeadingZeros =
parameters.get("allowNumericLeadingZeros").map(_.toBoolean).getOrElse(false)
val allowNonNumericNumbers =
parameters.get("allowNonNumericNumbers").map(_.toBoolean).getOrElse(true)
val allowBackslashEscapingAnyCharacter =
parameters.get("allowBackslashEscapingAnyCharacter").map(_.toBoolean).getOrElse(false)
private val allowUnquotedControlChars =
parameters.get("allowUnquotedControlChars").map(_.toBoolean).getOrElse(false)
val compressionCodec = parameters.get("compression").map(CompressionCodecs.getCodecClassName)
val parseMode: ParseMode =
parameters.get("mode").map(ParseMode.fromString).getOrElse(PermissiveMode)
val columnNameOfCorruptRecord =
parameters.getOrElse("columnNameOfCorruptRecord", defaultColumnNameOfCorruptRecord)
// Whether to ignore column of all null values or empty array/struct during schema inference
val dropFieldIfAllNull = parameters.get("dropFieldIfAllNull").map(_.toBoolean).getOrElse(false)
// Whether to ignore null fields during json generating
val ignoreNullFields = parameters.get("ignoreNullFields").map(_.toBoolean)
.getOrElse(SQLConf.get.jsonGeneratorIgnoreNullFields)
// A language tag in IETF BCP 47 format
val locale: Locale = parameters.get("locale").map(Locale.forLanguageTag).getOrElse(Locale.US)
val zoneId: ZoneId = DateTimeUtils.getZoneId(
parameters.getOrElse(DateTimeUtils.TIMEZONE_OPTION, defaultTimeZoneId))
val dateFormat: String = parameters.getOrElse("dateFormat", DateFormatter.defaultPattern)
val timestampFormat: String =
parameters.getOrElse("timestampFormat", s"${DateFormatter.defaultPattern}'T'HH:mm:ss.SSSXXX")
val multiLine = parameters.get("multiLine").map(_.toBoolean).getOrElse(false)
/**
* A string between two consecutive JSON records.
*/
val lineSeparator: Option[String] = parameters.get("lineSep").map { sep =>
require(sep.nonEmpty, "'lineSep' cannot be an empty string.")
sep
}
protected def checkedEncoding(enc: String): String = enc
/**
* Standard encoding (charset) name. For example UTF-8, UTF-16LE and UTF-32BE.
* If the encoding is not specified (None) in read, it will be detected automatically
* when the multiLine option is set to `true`. If encoding is not specified in write,
* UTF-8 is used by default.
*/
val encoding: Option[String] = parameters.get("encoding")
.orElse(parameters.get("charset")).map(checkedEncoding)
val lineSeparatorInRead: Option[Array[Byte]] = lineSeparator.map { lineSep =>
lineSep.getBytes(encoding.getOrElse(StandardCharsets.UTF_8.name()))
}
val lineSeparatorInWrite: String = lineSeparator.getOrElse("\\n")
/**
* Generating JSON strings in pretty representation if the parameter is enabled.
*/
val pretty: Boolean = parameters.get("pretty").map(_.toBoolean).getOrElse(false)
/**
* Enables inferring of TimestampType from strings matched to the timestamp pattern
* defined by the timestampFormat option.
*/
val inferTimestamp: Boolean = parameters.get("inferTimestamp").map(_.toBoolean).getOrElse(true)
/** Build a Jackson [[JsonFactory]] using JSON options. */
def buildJsonFactory(): JsonFactory = {
new JsonFactoryBuilder()
.configure(JsonReadFeature.ALLOW_JAVA_COMMENTS, allowComments)
.configure(JsonReadFeature.ALLOW_UNQUOTED_FIELD_NAMES, allowUnquotedFieldNames)
.configure(JsonReadFeature.ALLOW_SINGLE_QUOTES, allowSingleQuotes)
.configure(JsonReadFeature.ALLOW_LEADING_ZEROS_FOR_NUMBERS, allowNumericLeadingZeros)
.configure(JsonReadFeature.ALLOW_NON_NUMERIC_NUMBERS, allowNonNumericNumbers)
.configure(
JsonReadFeature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER,
allowBackslashEscapingAnyCharacter)
.configure(JsonReadFeature.ALLOW_UNESCAPED_CONTROL_CHARS, allowUnquotedControlChars)
.build()
}
}
private[sql] class JSONOptionsInRead(
@transient override val parameters: CaseInsensitiveMap[String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
extends JSONOptions(parameters, defaultTimeZoneId, defaultColumnNameOfCorruptRecord) {
def this(
parameters: Map[String, String],
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String = "") = {
this(
CaseInsensitiveMap(parameters),
defaultTimeZoneId,
defaultColumnNameOfCorruptRecord)
}
protected override def checkedEncoding(enc: String): String = {
val isBlacklisted = JSONOptionsInRead.blacklist.contains(Charset.forName(enc))
require(multiLine || !isBlacklisted,
s"""The ${enc} encoding must not be included in the blacklist when multiLine is disabled:
|Blacklist: ${JSONOptionsInRead.blacklist.mkString(", ")}""".stripMargin)
val isLineSepRequired =
multiLine || Charset.forName(enc) == StandardCharsets.UTF_8 || lineSeparator.nonEmpty
require(isLineSepRequired, s"The lineSep option must be specified for the $enc encoding")
enc
}
}
private[sql] object JSONOptionsInRead {
// The following encodings are not supported in per-line mode (multiline is false)
// because they cause some problems in reading files with BOM which is supposed to
// present in the files with such encodings. After splitting input files by lines,
// only the first lines will have the BOM which leads to impossibility for reading
// the rest lines. Besides of that, the lineSep option must have the BOM in such
// encodings which can never present between lines.
val blacklist = Seq(
Charset.forName("UTF-16"),
Charset.forName("UTF-32")
)
}
|
goldmedal/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala
|
Scala
|
apache-2.0
| 8,039
|
package me.yingrui.segment.hmm
import me.yingrui.segment.util.ISerialize
import me.yingrui.segment.util.SerializeHandler
class Trie extends ISerialize {
var key = -1
var count = 0
var prob = 0.0D
var descendant:Array[Trie] = null
def getKey(): Int = {
return key
}
def buildIndex(c: Int) {
prob = count.toDouble /(c.toDouble + 1.0D)
if (null != descendant) {
for (node <- descendant.toList) {
node.buildIndex(count)
}
Trie.sortor.sort(descendant)
}
}
def insert(ngram: Array[Int]): Trie = {
return insert(ngram, 1)
}
def insert(ngram: Array[Int], freq: Int): Trie = {
count += freq
if (ngram.length > 0) {
val k = ngram(0)
var n:Trie = if (null != descendant) binarySearch(descendant, descendant.length, k) else null
if (null == n) {
n = new Trie()
n.key = k
add(n)
descendant = Trie.sortor.sort(descendant)
}
val rec = new Array[Int](ngram.length - 1)
for (i <- 1 until ngram.length)
{
rec(i - 1) = ngram(i)
}
return n.insert(rec)
} else {
return this
}
}
def add(e: Trie) {
var i = 0
if (null == descendant) {
descendant = new Array[Trie](1)
} else {
val tmp = new Array[Trie](descendant.length + 1)
System.arraycopy(descendant, 0, tmp, 0, descendant.length)
i = descendant.length
descendant = tmp
}
descendant(i) = e
}
def searchNode(ngram: Array[Int]): Trie = {
val k = ngram(0)
val n = searchNode(k)
if (null != n && ngram.length > 1) {
val rec = new Array[Int](ngram.length - 1)
for (i <- 1 until ngram.length)
{
rec(i - 1) = ngram(i)
}
return n.searchNode(rec)
}
return n
}
def searchNode(k: Int): Trie = {
return if (null != descendant) binarySearch(descendant, descendant.length, k) else null
}
def getCount(): Int = {
return count
}
def setCount(count: Int) {
this.count = count
}
def getProb(): Double = {
return prob
}
def setProb(prob: Double) {
this.prob = prob
}
def binarySearch(list: Array[Trie], listLength: Int, searchItem: Int): Trie = {
if (null == list) {
return null
}
var first = 0
var last = listLength - 1
var mid = -1
var found = false
while (first <= last && !found) {
mid = (first + last) / 2
val i = list(mid).key - searchItem
if (i == 0) {
found = true
} else {
if (i > 0) {
last = mid - 1
} else {
first = mid + 1
}
}
}
if (found) {
return list(mid)
} else {
return null
}
}
def printTreeNode(indent: String) {
println(indent + key + " - " + count + " - " + prob)
if (null != descendant) {
for (node <- descendant) {
node.printTreeNode(indent + " ")
}
}
}
def getNumberOfNodeWhichCountLt(lt: Int): Int = {
var c = if(count < lt) 1 else 0
if (null != descendant) {
for (node <- descendant) {
c += node.getNumberOfNodeWhichCountLt(lt)
}
}
return c
}
def cutCountLowerThan(lt: Int) {
if (lt == 1) {
return
}
if (null != descendant) {
var l = List[Trie]()
for (i <- 0 until descendant.length)
{
val node = descendant(i)
if (node.getCount() >= lt) {
l = l ++ List(node)
node.cutCountLowerThan(lt)
}
}
descendant = l.toArray
}
}
override def save(writeHandler: SerializeHandler) {
writeHandler.serializeInt(key)
writeHandler.serializeInt(count)
writeHandler.serializeDouble(prob)
if (null != descendant) {
writeHandler.serializeInt(descendant.length)
for (child <- descendant.toList)
{
child.save(writeHandler)
}
} else {
writeHandler.serializeInt(0)
}
}
override def load(readHandler: SerializeHandler) {
key = readHandler.deserializeInt()
count = readHandler.deserializeInt()
prob = readHandler.deserializeDouble()
val numberOfDescendant = readHandler.deserializeInt()
if (numberOfDescendant > 0) {
descendant = new Array[Trie](numberOfDescendant)
for (i <- 0 until numberOfDescendant)
{
val child = new Trie()
child.load(readHandler)
descendant(i) = child
}
}
}
}
object Trie {
var sortor: TrieNodeSortor = new TrieNodeBinarySort()
def setTreeNodeSorter(trieNodeSortor: TrieNodeSortor) {
sortor = trieNodeSortor
}
}
|
yingrui/mahjong
|
lib-segment/src/main/scala/me/yingrui/segment/hmm/Trie.scala
|
Scala
|
gpl-3.0
| 4,595
|
package com.sksamuel.elastic4s.search
import com.sksamuel.elastic4s.JsonSugar
import org.elasticsearch.common.xcontent.{ToXContent, XContentFactory}
import org.scalatest.FlatSpec
class ScoreDslTest extends FlatSpec with JsonSugar {
import com.sksamuel.elastic4s.ElasticDsl._
"a score dsl" should "generate correct json for a linear decay function scorer" in {
val req = linearScore("myfield", "1 2", "2km").offset(100).decay(0.1)
val actual = req.builder.toXContent(XContentFactory.jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS).endObject().string()
actual should matchJsonResource("/json/score/score_linear.json")
}
it should "generate correct json for a gaussian decay function scorer" in {
val req = gaussianScore("myfield", "1 2", "3km").offset("1km").decay(0.2)
val actual = req.builder.toXContent(XContentFactory.jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS).endObject().string()
actual should matchJsonResource("/json/score/score_gaussian.json")
}
it should "generate correct json for an exponential decay function scorer" in {
val req = exponentialScore("myfield", "1 2", "4km").offset(100).decay(0.4)
val actual = req.builder.toXContent(XContentFactory.jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS).endObject().string()
actual should matchJsonResource("/json/score/score_exponential.json")
}
it should "generate correct json for a random function scorer" in {
val req = randomScore(12345)
val actual = req.builder.toXContent(XContentFactory.jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS).endObject().string()
actual should matchJsonResource("/json/score/score_random.json")
}
it should "generate correct json for a script scorer" in {
val req = scriptScore {
script("some script").lang("java").param("param1", "value1").params(Map("param2" -> "value2"))
}
val actual = req.builder.toXContent(XContentFactory.jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS).endObject().string()
actual should matchJsonResource("/json/score/score_script.json")
}
it should "generate correct json for a weight function scorer" in {
val req = weightScore(1.5)
val actual = req.builder.toXContent(XContentFactory.jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS).endObject().string()
actual should matchJsonResource("/json/score/score_weight.json")
}
}
|
ulric260/elastic4s
|
elastic4s-core-tests/src/test/scala/com/sksamuel/elastic4s/search/ScoreDslTest.scala
|
Scala
|
apache-2.0
| 2,397
|
package org.adridadou.ethereum.propeller.util
import org.adridadou.ethereum.propeller.solidity.abi.AbiEntry
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.check.Checkers
class JsonParserTest extends FlatSpec with Matchers with Checkers {
"Json parser" should "ignore unknown parameters" in {
val json =
"""[
| {
| "constant": true,
| "inputs": [],
| "name": "getName",
| "outputs": [
| {
| "internalType": "string",
| "name": "",
| "type": "string"
| }
| ],
| "payable": false,
| "stateMutability": "view",
| "type": "function"
| },
| {
| "constant": false,
| "inputs": [
| {
| "internalType": "string",
| "name": "_name",
| "type": "string"
| }
| ],
| "name": "recordTest",
| "outputs": [],
| "payable": false,
| "stateMutability": "nonpayable",
| "type": "function"
| }
|]""".stripMargin
AbiEntry.parse(json)
}
}
|
adridadou/eth-propeller-core
|
src/test/scala/org/adridadou/ethereum/propeller/util/JsonParserTest.scala
|
Scala
|
apache-2.0
| 1,010
|
/*
* ____ ____ _____ ____ ___ ____
* | _ \ | _ \ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \_\ |_____| \____| /__/ \____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.yggdrasil
sealed trait StorageFormat {
def min(i: Int): Int
val isFixed: Boolean
}
case object LengthEncoded extends StorageFormat {
def min(i: Int) = i
final val isFixed = false
}
case class FixedWidth(width: Int) extends StorageFormat {
def min(i: Int) = width min i
final val isFixed = true
}
// vim: set ts=4 sw=4 et:
|
precog/platform
|
yggdrasil/src/main/scala/com/precog/yggdrasil/StorageFormat.scala
|
Scala
|
agpl-3.0
| 1,433
|
package euler
package til70
object Euler70 extends EulerProblem {
override def result = {
val res = (2 until 10000000).view map { n =>
(n, Ο(n))
} filter {
case (n, Οn) => isPermutation(n, Οn)
} minBy { case (n, Οn) => n.toDouble / Οn }
res._1
}
// good for n < 10000000
lazy val Οs = euler.totient.Totient(10000000)
def Ο(n: Int): Int = Οs(n)
def isPermutation(x: Int, y: Int): Boolean = {
x.toString.sorted == y.toString.sorted
}
}
|
TrustNoOne/Euler
|
scala/src/main/scala/euler/til70/Euler70.scala
|
Scala
|
mit
| 495
|
/*
* Copyright (C) 2015 Jonathan Passerat-Palmbach
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package toolxit.bibtex.macros
import scala.reflect.macros.Context
import scala.language.experimental.macros
// from http://stackoverflow.com/a/16591277/470341
object OctalLiterals {
implicit class OctallerContext(sc: StringContext) {
def o(): Int = macro oImpl
}
def oImpl(c: Context)(): c.Expr[Int] = {
import c.universe._
c.literal(c.prefix.tree match {
case Apply(_, Apply(_, Literal(Constant(oct: String)) :: Nil) :: Nil) β
Integer.decode("0" + oct)
case _ β c.abort(c.enclosingPosition, "Invalid octal literal.")
})
}
}
|
ISCPIF/PSEExperiments
|
openmole-src/openmole/third-parties/toolxit.bibtex/macros/src/main/scala/toolxit/bibtex/macros/OctalLiterals.scala
|
Scala
|
agpl-3.0
| 1,284
|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.driver.test.writer
import java.io.{Serializable => JSerializable}
import com.stratio.sparta.driver.step.{Cube, Trigger}
import com.stratio.sparta.driver.writer.{CubeWriter, CubeWriterOptions}
import com.stratio.sparta.sdk.pipeline.aggregation.cube.{Dimension, DimensionType, DimensionValue, DimensionValuesTime, ExpiringData, MeasuresValues, Precision}
import com.stratio.sparta.sdk.pipeline.aggregation.operator.Operator
import com.stratio.sparta.sdk.pipeline.output.{Output, SaveModeEnum}
import com.stratio.sparta.sdk.pipeline.schema.TypeOp
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row}
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CubeWriterTest extends FlatSpec with ShouldMatchers {
"CubeWriterTest" should "return a row with values and timeDimension" in
new CommonValues {
val schema = StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField(checkpointGranularity, TimestampType, false),
StructField("op1", LongType, true)))
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, schema,
Option(ExpiringData("minute", checkpointGranularity, "100000ms")), Seq.empty[Trigger], CubeWriterOptions())
val writerOptions = CubeWriterOptions(Seq("outputName"))
val output = new OutputMock("outputName", Map())
val cubeWriter = CubeWriter(cube, Seq(output))
val res = cubeWriter.toRow(dimensionValuesT, measures)
res should be(Row.fromSeq(Seq("value1", "value2", 1L, "value")))
}
"CubeWriterTest" should "return a row with values without timeDimension" in
new CommonValues {
val schema = StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField("op1", LongType, true)))
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, schema, None, Seq.empty[Trigger],
CubeWriterOptions())
val writerOptions = CubeWriterOptions(Seq("outputName"))
val output = new OutputMock("outputName", Map())
val cubeWriter = CubeWriter(cube, Seq(output))
val res = cubeWriter.toRow(dimensionValuesNoTime, measures)
res should be(Row.fromSeq(Seq("value1", "value2", "value")))
}
"CubeWriterTest" should "return a row with values with noTime" in
new CommonValues {
val schema = StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField("op1", LongType, true)))
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, schema, None, Seq.empty[Trigger],
CubeWriterOptions())
val writerOptions = CubeWriterOptions(Seq("outputName"), TypeOp.Timestamp)
val output = new OutputMock("outputName", Map())
val cubeWriter = CubeWriter(cube, Seq(output))
val res = cubeWriter.toRow(dimensionValuesNoTime, measures)
res should be(Row.fromSeq(Seq("value1", "value2", "value")))
}
"CubeWriterTest" should "return a row with values with time" in
new CommonValues {
val schema = StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField("op1", LongType, true)))
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, schema, None, Seq.empty[Trigger],
CubeWriterOptions())
val writerOptions = CubeWriterOptions(Seq("outputName"), TypeOp.Timestamp)
val output = new OutputMock("outputName", Map())
val cubeWriter = CubeWriter(cube, Seq(output))
val res = cubeWriter.toRow(dimensionValuesT, measures)
res should be(Row.fromSeq(Seq("value1", "value2", 1L, "value")))
}
class OperatorTest(name: String, val schema: StructType, properties: Map[String, JSerializable])
extends Operator(name, schema, properties) {
override val defaultTypeOperation = TypeOp.Long
override val defaultCastingFilterType = TypeOp.Number
override def processMap(inputFields: Row): Option[Any] = {
None
}
override def processReduce(values: Iterable[Option[Any]]): Option[Long] = {
None
}
}
class OutputMock(keyName: String, properties: Map[String, JSerializable])
extends Output(keyName, properties) {
override def save(dataFrame: DataFrame, saveMode: SaveModeEnum.Value, options: Map[String, String]): Unit = {}
}
class DimensionTypeTest extends DimensionType {
override val operationProps: Map[String, JSerializable] = Map()
override val properties: Map[String, JSerializable] = Map()
override val defaultTypeOperation = TypeOp.String
override def precisionValue(keyName: String, value: Any): (Precision, Any) = {
val precision = DimensionType.getIdentity(getTypeOperation, defaultTypeOperation)
(precision, TypeOp.transformValueByTypeOp(precision.typeOp, value))
}
override def precision(keyName: String): Precision =
DimensionType.getIdentity(getTypeOperation, defaultTypeOperation)
}
trait CommonValues {
val dim1: Dimension = Dimension("dim1", "field1", "", new DimensionTypeTest)
val dim2: Dimension = Dimension("dim2", "field2", "", new DimensionTypeTest)
val dimId: Dimension = Dimension("id", "field2", "", new DimensionTypeTest)
val op1: Operator = new OperatorTest("op1", StructType(Seq(StructField("n", LongType, false))), Map())
val checkpointAvailable = 60000
val checkpointGranularity = "minute"
val cubeName = "cubeTest"
val defaultDimension = new DimensionTypeTest
val dimensionValuesT = DimensionValuesTime("testCube", Seq(DimensionValue(
Dimension("dim1", "eventKey", "identity", defaultDimension), "value1"),
DimensionValue(
Dimension("dim2", "eventKey", "identity", defaultDimension), "value2"),
DimensionValue(
Dimension("minute", "eventKey", "identity", defaultDimension), 1L)))
val dimensionValuesNoTime = DimensionValuesTime("testCube", Seq(DimensionValue(
Dimension("dim1", "eventKey", "identity", defaultDimension), "value1"),
DimensionValue(
Dimension("dim2", "eventKey", "identity", defaultDimension), "value2")))
val measures = MeasuresValues(Map("field" -> Option("value")))
val initSchema = StructType(Seq(StructField("n", StringType, false)))
}
}
|
diegohurtado/sparta
|
driver/src/test/scala/com/stratio/sparta/driver/test/writer/CubeWriterTest.scala
|
Scala
|
apache-2.0
| 7,115
|
package com.twitter.finagle.ssl.client
import com.twitter.finagle.Address
import com.twitter.finagle.ssl._
import com.twitter.io.TempFile
import java.io.File
import java.net.InetSocketAddress
import org.scalatest.funsuite.AnyFunSuite
class JdkClientEngineFactoryTest extends AnyFunSuite {
private[this] val address: Address = Address(new InetSocketAddress("localhost", 12345))
private[this] val other: Address = Address.Failed(new Exception("testing"))
test("default config with inet address creates client engine with peer") {
val config = SslClientConfiguration()
val engine = JdkClientEngineFactory(address, config)
val sslEngine = engine.self
assert(sslEngine.getUseClientMode())
assert(sslEngine.getPeerHost() == "localhost")
assert(sslEngine.getPeerPort() == 12345)
}
test("default config without inet address creates client engine without peer") {
val config = SslClientConfiguration()
val engine = JdkClientEngineFactory(other, config)
val sslEngine = engine.self
assert(sslEngine.getUseClientMode())
assert(sslEngine.getPeerHost() == null)
assert(sslEngine.getPeerPort() == -1)
}
test("config with good cert and key credentials succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/test-rsa.crt")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/test-pkcs8.key")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = JdkClientEngineFactory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad cert or key credential fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/test-pkcs8.key")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
JdkClientEngineFactory(address, config)
}
}
test("config with good cert chain and key credentials succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/test-rsa-full-cert-chain.crt")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/test-pkcs8.key")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertsAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
val engine = JdkClientEngineFactory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad cert chain or key credential fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/test-pkcs8.key")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertsAndKey(tempCertFile, tempKeyFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
JdkClientEngineFactory(address, config)
}
}
test("config with cert, key, and chain fails") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/test-rsa.crt")
// deleteOnExit is handled by TempFile
val tempKeyFile = TempFile.fromResourcePath("/ssl/keys/test-pkcs8.key")
// deleteOnExit is handled by TempFile
val keyCredentials = KeyCredentials.CertKeyAndChain(tempCertFile, tempKeyFile, tempCertFile)
val config = SslClientConfiguration(keyCredentials = keyCredentials)
intercept[SslConfigurationException] {
JdkClientEngineFactory(address, config)
}
}
test("config with insecure trust credentials succeeds") {
val config = SslClientConfiguration(trustCredentials = TrustCredentials.Insecure)
val engine = JdkClientEngineFactory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with good trusted cert collection succeeds") {
val tempCertFile = TempFile.fromResourcePath("/ssl/certs/test-rsa.crt")
// deleteOnExit is handled by TempFile
val trustCredentials = TrustCredentials.CertCollection(tempCertFile)
val config = SslClientConfiguration(trustCredentials = trustCredentials)
val engine = JdkClientEngineFactory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
}
test("config with bad trusted cert collection fails") {
val tempCertFile = File.createTempFile("test", "crt")
tempCertFile.deleteOnExit()
val trustCredentials = TrustCredentials.CertCollection(tempCertFile)
val config = SslClientConfiguration(trustCredentials = trustCredentials)
intercept[SslConfigurationException] {
JdkClientEngineFactory(address, config)
}
}
test("config with good cipher suites succeeds") {
val cipherSuites = CipherSuites.Enabled(Seq("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"))
val config = SslClientConfiguration(cipherSuites = cipherSuites)
val engine = JdkClientEngineFactory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
val enabled = sslEngine.getEnabledCipherSuites()
assert(enabled.length == 1)
assert(enabled(0) == "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256")
}
test("config with bad cipher suites fails") {
val cipherSuites = CipherSuites.Enabled(Seq("TLS_ECDHE_ECDSA_WITH_AES_102_CBC_SHA496"))
val config = SslClientConfiguration(cipherSuites = cipherSuites)
intercept[IllegalArgumentException] {
JdkClientEngineFactory(address, config)
}
}
test("config with good enabled protocols succeeds") {
val protocols = Protocols.Enabled(Seq("TLSv1.2"))
val config = SslClientConfiguration(protocols = protocols)
val engine = JdkClientEngineFactory(address, config)
val sslEngine = engine.self
assert(sslEngine != null)
val enabled = sslEngine.getEnabledProtocols()
assert(enabled.length == 1)
assert(enabled(0) == "TLSv1.2")
}
test("config with bad enabled protocols fails") {
val protocols = Protocols.Enabled(Seq("TLSv2.0"))
val config = SslClientConfiguration(protocols = protocols)
intercept[IllegalArgumentException] {
JdkClientEngineFactory(address, config)
}
}
test("config with any application protocols fails") {
val appProtocols = ApplicationProtocols.Supported(Seq("h2"))
val config = SslClientConfiguration(applicationProtocols = appProtocols)
intercept[SslConfigurationException] {
JdkClientEngineFactory(address, config)
}
}
}
|
twitter/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/ssl/client/JdkClientEngineFactoryTest.scala
|
Scala
|
apache-2.0
| 6,861
|
package akkahttptwirl
import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.{HttpCharsets, MediaTypes, ResponseEntity}
import akka.stream.ActorMaterializer
import org.scalatest.{OptionValues, BeforeAndAfterAll, Matchers, WordSpec}
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.DurationInt
class TwirlSupportSpec extends WordSpec with TwirlSupport with Matchers with BeforeAndAfterAll with OptionValues {
import stub.Foo
val foo = Foo("Bar")
val timeout = 100 millis
implicit val system = ActorSystem()
implicit val materializer = ActorMaterializer()
"TwirlMarshalling" should {
"be enable to marshal twirl.scala.html to String" in {
val entity = Await.result(Marshal(html.twirl.render(foo)).to[ResponseEntity].flatMap(_.toStrict(timeout)), timeout)
entity.contentType.mediaType shouldBe MediaTypes.`text/html`
entity.contentType.charsetOption.value shouldBe HttpCharsets.`UTF-8`
entity.data.decodeString("UTF-8") should include ("<h1>Welcome Bar!</h1>")
}
"be enable to marshal twirl.scala.txt to String" in {
val entity = Await.result(Marshal(txt.twirl.render(foo)).to[ResponseEntity].flatMap(_.toStrict(timeout)), timeout)
entity.contentType.mediaType shouldBe MediaTypes.`text/plain`
entity.contentType.charsetOption.value shouldBe HttpCharsets.`UTF-8`
entity.data.decodeString("UTF-8") should include ("Welcome Bar!")
}
"be enable to marshal twirl.scala.xml to String" in {
val entity = Await.result(Marshal(xml.twirl.render(foo)).to[ResponseEntity].flatMap(_.toStrict(timeout)), timeout)
entity.contentType.mediaType shouldBe MediaTypes.`text/xml`
entity.contentType.charsetOption.value shouldBe HttpCharsets.`UTF-8`
entity.data.decodeString("UTF-8") should include ("<welcome>Bar</welcome>")
}
}
override def afterAll(){
system.shutdown()
system.awaitTermination()
super.afterAll()
}
}
|
btomala/akka-http-twirl
|
src/test/scala/akkahttptwirl/TwirlSupportSpec.scala
|
Scala
|
apache-2.0
| 2,064
|
package edu.gemini.util.skycalc.calc
import edu.gemini.skycalc.{MoonCalc, TimeUtils, ImprovedSkyCalc}
import java.util.Date
import edu.gemini.spModel.core.Site
import jsky.coords.WorldCoords
import edu.gemini.util.skycalc.calc.MoonCalculator.Fields
import javax.swing.Icon
import java.awt.geom.Arc2D
import java.awt.{Color, Graphics2D, Component, Graphics}
/**
* Support for a variety of calculations regarding the moon.
*/
trait MoonCalculator extends Calculator {
require(site == Site.GN || site == Site.GS)
val site: Site
val values: Vector[Vector[Double]] = calculate()
import Fields._
lazy val elevation: Double = valueAt(Elevation.id, start)
lazy val phaseAngle: Double = valueAt(PhaseAngle.id, start)
lazy val illuminatedFraction: Double = valueAt(IlluminatedFraction.id, start)
def elevationAt(t: Long): Double = valueAt(Elevation.id, t)
lazy val minElevation: Double = min(Elevation.id)
lazy val maxElevation: Double = max(Elevation.id)
lazy val meanElevation: Double = mean(Elevation.id)
def phaseAngleAt(t: Long): Double = valueAt(PhaseAngle.id, t)
lazy val minPhaseAngle: Double = min(PhaseAngle.id)
lazy val maxPhaseAngle: Double = max(PhaseAngle.id)
lazy val meanPhaseAngle: Double = mean(PhaseAngle.id)
def illuminatedFractionAt(t: Long): Double = valueAt(IlluminatedFraction.id, t)
lazy val minIlluminatedFraction: Double = min(IlluminatedFraction.id)
lazy val maxIlluminatedFraction: Double = max(IlluminatedFraction.id)
lazy val meanIlluminatedFraction: Double = mean(IlluminatedFraction.id)
lazy val newMoons: Seq[Long] = MoonCalculator.calculatePhases(site, Interval(start, end), MoonCalc.Phase.NEW)
lazy val firstQuarterMoons: Seq[Long] = MoonCalculator.calculatePhases(site, Interval(start, end), MoonCalc.Phase.FIRST_QUARTER)
lazy val fullMoons: Seq[Long] = MoonCalculator.calculatePhases(site, Interval(start, end), MoonCalc.Phase.FULL)
lazy val lastQuarterMoons: Seq[Long] = MoonCalculator.calculatePhases(site, Interval(start, end), MoonCalc.Phase.LAST_QUARTER)
protected def calculate() = {
val skycalc = new ImprovedSkyCalc(site)
val dummy = new WorldCoords(0, 0)
// prepare temporary data structure
val values = Array (
new Array[Double](samples),
new Array[Double](samples),
new Array[Double](samples),
new Array[Double](samples),
new Array[Double](samples),
new Array[Double](samples),
new Array[Double](samples)
)
// fill temporary data structure with calculated values
for (ix <- 0 to samples-1) {
val t = times(ix)
skycalc.calculate(dummy, new Date(t), true)
values(Elevation.id)(ix) = skycalc.getLunarElevation
values(PhaseAngle.id)(ix) = skycalc.getLunarPhaseAngle
values(IlluminatedFraction.id)(ix) = skycalc.getLunarIlluminatedFraction
values(SkyBrightness.id)(ix) = if (skycalc.getLunarSkyBrightness == null) 0.0 else skycalc.getLunarSkyBrightness.toDouble
}
// turn into immutable data structure
Vector(
// IMPORTANT: Make sure the order reflects the id values of the field enums!
Vector(values(Elevation.id):_*),
Vector(values(PhaseAngle.id):_*),
Vector(values(IlluminatedFraction.id):_*),
Vector(values(SkyBrightness.id):_*)
)
}
}
case class IntervalMoonCalculator(site: Site, defined: Interval, rate: Long) extends FixedRateCalculator with LinearInterpolatingCalculator with MoonCalculator
case class SampleMoonCalculator(site: Site, times: Vector[Long]) extends IrregularIntervalCalculator with LinearInterpolatingCalculator with MoonCalculator
case class SingleValueMoonCalculator(site: Site, time: Long) extends SingleValueCalculator with MoonCalculator
object MoonCalculator {
/** Enumeration that defines the different fields for this calculator for indexed access in sequence. */
object Fields extends Enumeration {
type Field = Value
val Elevation, PhaseAngle, IlluminatedFraction, SkyBrightness = Value
}
def apply(site: Site, defined: Interval, rate: Long = TimeUtils.seconds(30)): MoonCalculator = {
new IntervalMoonCalculator(site, defined, rate)
}
def apply(site: Site, time: Long): MoonCalculator = {
new SingleValueMoonCalculator(site, time)
}
def apply(site: Site, times: Vector[Long]): MoonCalculator = {
new SampleMoonCalculator(site, times)
}
def newMoons(site: Site, interval: Interval): Seq[Long] = calculatePhases(site, interval, MoonCalc.Phase.NEW)
def firstQuarterMoons(site: Site, interval: Interval): Seq[Long] = calculatePhases(site, interval, MoonCalc.Phase.FIRST_QUARTER)
def fullMoons(site: Site, interval: Interval): Seq[Long] = calculatePhases(site, interval, MoonCalc.Phase.FULL)
def lastQuarterMoons(site: Site, interval: Interval): Seq[Long] = calculatePhases(site, interval, MoonCalc.Phase.LAST_QUARTER)
/** Calculates times of the given moon phase for the given interval. */
protected def calculatePhases(site: Site, interval: Interval, phaseConstant: MoonCalc.Phase): Seq[Long] = {
def sample(period: Int, res: Seq[Long], t: Long): Seq[Long] = {
if (t > interval.end) res
else {
val time = MoonCalc.getMoonTime(period, phaseConstant)
val r = if (interval.contains(time)) res :+ time else res
sample(period+1, r, time)
}
}
val period = MoonCalc.approximatePeriod(interval.start)
sample(period, Seq(), interval.start)
}
}
class MoonIcon(size: Int, illum: Double, waxing: Boolean) extends Icon {
val baseAngle = if (waxing) 90 else 270
def getIconHeight: Int = size
def getIconWidth: Int = size
def paintIcon(c: Component, g: Graphics, x: Int, y: Int): Unit = {
val g2d = g.asInstanceOf[Graphics2D]
// Dark side
val darkSize = halfMoon(x, y, getIconWidth() - 1, getIconHeight() - 1, 0)
g2d.setColor(Color.BLACK)
g2d.fill(darkSize)
// Light side
val brightSide = halfMoon(x, y, getIconWidth() - 1, getIconHeight() - 1, 180)
g2d.setColor(Color.WHITE)
g2d.fill(brightSide)
// Additional shadow or light
if (illum < 0.5) {
val width = (0.5 - illum) * getIconWidth().toDouble
val shadow = halfMoon(x + size / 2 - width, y, width * 2 - 1, size - 1, 180)
g2d.setColor(Color.BLACK)
g2d.fill(shadow)
} else if (illum > 0.5) {
val width = (illum - 0.5) * getIconWidth().toDouble
val light = halfMoon(x + size / 2 - width, y, width * 2 - 1, size - 1, 0)
g2d.setColor(Color.WHITE)
g2d.fill(light)
}
// Gray outline
g2d.setColor(Color.GRAY)
val circle = new Arc2D.Double(x, y, size - 1, size - 1, 0, 360, Arc2D.OPEN)
g2d.draw(circle)
}
private def halfMoon(x: Double, y: Double, width: Double, height: Double, angle: Int, degrees: Int): Arc2D.Double =
new Arc2D.Double(x, y, width, height, baseAngle + angle, degrees, Arc2D.OPEN)
private def halfMoon(x: Double, y: Double, width: Double, height: Double, angle: Int): Arc2D.Double =
halfMoon(x, y, width, height, angle, 180)
}
|
arturog8m/ocs
|
bundle/edu.gemini.util.skycalc/src/main/scala/edu/gemini/util/skycalc/calc/MoonCalculator.scala
|
Scala
|
bsd-3-clause
| 7,088
|
package com.wlangiewicz.workouttracker.dao
import com.github.nscala_time.time.Imports._
import com.wlangiewicz.workouttracker.domain._
class WorkoutDao {
var workouts = Set(
Workout(UserId(1), WorkoutId(1), "morning run", 10000, 3700, new DateTime(2016, 2, 9, 11, 0, 0, 0)),
Workout(UserId(1), WorkoutId(2), "evening run", 10000, 3650, new DateTime(2016, 2, 9, 12, 0, 0, 0)),
Workout(UserId(1), WorkoutId(3), "morning run 2", 10000, 3600, new DateTime(2016, 2, 10, 12, 0, 0, 0)),
Workout(UserId(1), WorkoutId(4), "evening run 3", 10000, 3550, new DateTime(2016, 2, 15, 12, 0, 0, 0))
)
def add(workout: Workout) = {
workouts = workouts + workout
}
// Using String instead of [Int, Int] as Map key - spray-json bug: https://github.com/spray/spray-json/issues/125
private def sprayBugWorkaround(m: Map[(Int, Int), Set[Workout]]) = m.map(w => (w._1._1.toString + "-" + w._1._2.toString, w._2))
// Using String instead of [Int, Int] as Map key - spray-json bug: https://github.com/spray/spray-json/issues/125
def findAllByUserInRangeGroupedWeekly(userId: UserId, rangeStart: DateTime, rangeEnd: DateTime): Map[String, Set[Workout]] = {
sprayBugWorkaround(findInDateRangeByUser(userId, rangeStart, rangeEnd).groupBy(w => (w.date.getYear, w.date.getWeekOfWeekyear)))
}
def findInDateRangeByUser(userId: UserId, rangeStart: DateTime, rangeEnd: DateTime) = {
workouts.filter(w => w.userId == userId && w.date >= rangeStart && w.date <= rangeEnd)
}
// Using String instead of [Int, Int] as Map key - spray-json bug: https://github.com/spray/spray-json/issues/125
def findAllByUserGroupedWeekly(userId: UserId): Map[String, Set[Workout]] = {
sprayBugWorkaround(findAllByUser(userId).groupBy(w => (w.date.getYear, w.date.getWeekOfWeekyear)))
}
def findAllByUser(userId: UserId): Set[Workout] = workouts.filter(w => w.userId == userId)
def editWorkout(workout: Workout) = {
deleteWorkout(workout.workoutId)
add(workout)
workout
}
def deleteWorkout(workoutId: WorkoutId): Unit = {
workouts.find(_.workoutId == workoutId).foreach(w => workouts = workouts - w)
}
def isOwner(userId: UserId, workoutId: WorkoutId) = {
workouts.exists(w => w.workoutId == workoutId && w.userId == userId)
}
def isValidWorkout(newWorkoutRequest: RecordWorkoutRequest): Boolean = {
newWorkoutRequest.date > DateTime.now - 10.years // bogus condition for invalid request
}
def nextWorkoutId = {
val currentMaxWorkoutId = workouts.map(_.workoutId.value).max
WorkoutId(currentMaxWorkoutId + 1)
}
}
|
wlk/workout-tracker-akka-http
|
src/main/scala/com/wlangiewicz/workouttracker/dao/WorkoutDao.scala
|
Scala
|
mit
| 2,586
|
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.action
import io.gatling.AkkaSpec
import io.gatling.core.session.Session
import io.gatling.core.stats.DataWritersStatsEngine
import io.gatling.core.stats.writer.GroupMessage
import akka.testkit._
class GroupEndSpec extends AkkaSpec {
"GroupEnd" should "exit the current group" in {
val dataWriterProbe = TestProbe()
val statsEngine = new DataWritersStatsEngine(system, List(dataWriterProbe.ref))
val groupEnd = new GroupEnd(statsEngine, new ActorDelegatingAction("next", self))
val session = Session("scenario", 0)
val sessionInGroup = session.enterGroup("group")
groupEnd ! sessionInGroup
expectMsg(session)
dataWriterProbe.expectMsgType[GroupMessage]
}
}
|
GabrielPlassard/gatling
|
gatling-core/src/test/scala/io/gatling/core/action/GroupEndSpec.scala
|
Scala
|
apache-2.0
| 1,343
|
package com.workshop
object FunctionsComposition {
// Functions Composition
val toLowerCase: String => String = _.toLowerCase
val capitalizeWords: String => String =
(value: String) => value.split(" ").map(word => word.take(1).toUpperCase + word.substring(1)).mkString(" ")
val removeSpaces: String => String = _.replace(" ", "")
val camelCase: (String) => String =
toLowerCase
.andThen(capitalizeWords)
.andThen(removeSpaces)
//Now your turn
val price = 10
def taxCalculatorFactory(taxPercentage: Int): Double => Double = _ * (1 + taxPercentage / 100d)
def taxCalculatorFor(country: String): Double => Double = country match {
case "IL" => taxCalculatorFactory(18)
case "UA" => taxCalculatorFactory(20)
}
val applyTax: Double => Double = taxCalculatorFor("UA")
val applyDiscount: Double => Double = _ * 0.8
val applyShipping: Double => Double = _ + 10
// use .andThen to create the function as a combination of existing functions
val calculatePriceThenShipping: Double => Double = _ => ???
// use .compose (opposite order of andThen). Note: Now we're using `def` for the function deceleration
def addShippingThenCalculatePrice(price: Double): Double = ???
}
|
maximn/scala-workshop
|
src/main/scala/com/workshop/FunctionsComposition.scala
|
Scala
|
mit
| 1,229
|
package dsentric.filter
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import dsentric.{DArray, DObject, Dsentric}
class DFilterTests extends AnyFunSuite with Matchers {
import Dsentric._
import dsentric.filter.DFilterSyntax._
import dsentric.codecs.std.DCodecs._
test("Existance/nonexistance of field") {
object Query1 extends Contract {
val field = \\?[String]
val nested = new \\\\ {
val field2 = \\?[String]
}
}
val query = Query1.field.$exists(true)
query.isMatch(DObject("field" := "value")) should be(true)
query.isMatch(DObject("field2" := "value")) should be(false)
val query2 = Query1.field.$exists(false) && Query1.nested.field2.$exists(true)
query2.isMatch(DObject("nested" -> DObject("field2" := "value"))) should be(true)
query2.isMatch(DObject("field" := "value", "nested" ::= ("field2" := "value"))) should be(false)
}
test("Equality") {
object Query2 extends Contract {
val field = \\?[String]
val nested = new \\\\ {
val field2 = \\[Int]
}
}
val query1 = Query2.field.$eq("TEST") || Query2.nested.field2.$eq(45)
query1.isMatch(DObject("field" := "TEST")) should be(true)
query1.isMatch(DObject.empty) should be(false)
query1.isMatch(DObject("field" := "TEST2")) should be(false)
query1.isMatch(DObject("nested" -> DObject("field2" := 45))) should be(true)
query1.isMatch(DObject("field" := "TEST", "nested" -> DObject("field2" := 45))) should be(true)
val query3 = Query2(q => q.field.$in("TEST", "TEST2") && q.nested.field2.$nin(4, 5, 6))
query3.isMatch(DObject("field" := "TEST")) should be(true)
query3.isMatch(DObject("field" := "TEST", "nested" -> DObject("field2" := 3))) should be(true)
query3.isMatch(DObject("field" := "TEST", "nested" -> DObject("field2" := 4))) should be(false)
query3.isMatch(DObject("field" := "TEST3")) should be(false)
query3.isMatch(DObject("field" := "TEST3", "nested" -> DObject("field2" := 3))) should be(false)
query3.isMatch(DObject("nested" -> DObject("field2" := 3))) should be(false)
//TODO not a generalised solution
val query4 = Query2.field.$like("value")
query4.isMatch(DObject("field" := "Value")) should be(true)
query4.isMatch(DObject.empty) should be(false)
query4.isMatch(DObject("field" := "Values")) should be(false)
val query5 = Query2.field.$like("%lue")
query5.isMatch(DObject("field" := "ValuE")) should be(true)
query5.isMatch(DObject.empty) should be(false)
query5.isMatch(DObject("field" := "Values")) should be(false)
val query6 = Query2.field.$regex("vaLUe", "i")
query6.isMatch(DObject("field" := "Value")) should be(true)
query6.isMatch(DObject.empty) should be(false)
query6.isMatch(DObject("field" := "Values")) should be(false)
}
test("Long double equality") {
DFilter("field" := 1L).isMatch(DObject("field" := 1.00d)) shouldBe true
}
test("element value match") {
object Query3 extends Contract {
val doubles = \\[Vector[Long]]
val nested = new \\\\ {
val strings = \\?[Vector[String]]
}
}
val query1 = Query3.doubles.$elemMatch(_.$gt(4))
query1.isMatch(DObject("doubles" := Vector(3, 5))) should be(true)
query1.isMatch(DObject("doubles" := Vector(2, 4))) should be(false)
query1.isMatch(DObject("doubles" -> DArray.empty)) should be(false)
val query2 = Query3.nested.strings.$elemMatch(_.$eq("value"))
query2.isMatch(DObject("nested" -> DObject("strings" := Vector("value", "test")))) should be(true)
query2.isMatch(DObject("nested" -> DObject("strings" := Vector("test", "test2")))) should be(false)
}
test("element object match") {
object Query3 extends Contract {
val objs = \\[DArray]
}
object Element extends Contract {
val value = \\[String]
}
val query1 = Query3.objs.$elemMatch(_ => Element.value.$eq("Test"))
query1.isMatch(DObject("objs" := DArray(DObject("value" := "Test2"), DObject("value" := "Test")))) should be(true)
query1.isMatch(DObject("objs" := DArray(DObject("value" := "Test2"), DObject("value" := "Test3")))) should be(false)
query1.isMatch(DObject("objs" -> DArray.empty)) should be(false)
}
test("boolean operators") {
object Query4 extends Contract {
val value = \\[Double]("value")
}
val query1 = Query4.value.$gt(0) || Query4.value.$lt(-10)
query1.isMatch(DObject("value" := 2)) should be(true)
query1.isMatch(DObject("value" := -3)) should be(false)
query1.isMatch(DObject("value" := -15)) should be(true)
val query2 = query1 !
query2.isMatch(DObject("value" := 2)) should be(false)
query2.isMatch(DObject("value" := -3)) should be(true)
query2.isMatch(DObject("value" := -15)) should be(false)
val query3 = Query4.value.$gte(0) && Query4.value.$lt(50)
query3.isMatch(DObject("value" := 12)) should be(true)
query3.isMatch(DObject("value" := -3)) should be(false)
query3.isMatch(DObject("value" := 50)) should be(false)
}
test("contract element match") {
object Element extends Contract {
val value = \\[Int]
}
object Query5 extends Contract {
val elements = \\[Vector[DObject]](Element)
}
val query = Query5.elements.$elemMatch(_ => Element.value.$gt(5))
query.isMatch(DObject("elements" -> DArray.empty)) should be(false)
query.isMatch(DObject("elements" -> DArray(DObject("value" := 6)))) should be(true)
query.isMatch(DObject("elements" -> DArray(DObject("value" := 4)))) should be(false)
query.isMatch(
DObject(
"elements" -> DArray(DObject("value" := 4), DObject("value" := 3), DObject("value" := 7), DObject("value" := 4))
)
) should be(true)
}
test("Map property match") {
object Query5 extends Contract {
val elements = \\[Map[String, String]]
}
val query = (Query5.elements \\ "child").$regex(".*bob.*")
query.isMatch(DObject("elements" -> DObject.empty)) should be(false)
query.isMatch(DObject("elements" ::= ("child" := "what bob value"))) should be(true)
query.isMatch(DObject("elements" ::= ("child" := "fail"))) should be(false)
}
test("Map property object match") {
object Element extends Contract {
val value = \\[Double]
}
object Query5 extends Contract {
val elements = \\[Map[String, DObject]](Element)
}
val query = (Query5.elements \\ "child" \\\\ Element.value).$gt(45)
query.isMatch(DObject("elements" -> DObject.empty)) should be(false)
query.isMatch(DObject("elements" ::= ("child" ::= ("value" := 56.78)))) should be(true)
query.isMatch(DObject("elements" ::= ("child" ::= ("value" := 33)))) should be(false)
}
}
|
HigherState/dsentric
|
maps/src/test/scala/dsentric/filter/DFilterTests.scala
|
Scala
|
apache-2.0
| 6,768
|
package toguru.toggles
import akka.actor.{Actor, ActorSystem, Props}
import com.codahale.metrics.Counter
import com.typesafe.config.{Config => TypesafeConfig}
import play.api.http.HeaderNames
import play.api.libs.json.Json
import play.api.test.FakeRequest
import play.api.test.Helpers._
import toguru.app.Config
import toguru.helpers.ControllerSpec
import toguru.toggles.ToggleStateActor.{GetState, ToggleStateInitializing}
import toguru.toggles.events.Rollout
import scala.concurrent.duration._
object ToggleStateControllerSpec {
implicit class MyFakeRequest[A](val request: FakeRequest[A]) extends AnyVal {
def withAccept(mimeType: String) = request.withHeaders(HeaderNames.ACCEPT -> mimeType)
}
case class ToggleStatesV2(sequenceNo: Long, toggles: Seq[ToggleStateV2])
def stateV2(s: ToggleState) =
ToggleStateV2(s.id, s.tags, s.activations.headOption.flatMap(_.rollout.map(_.percentage)))
case class ToggleStateV2(id: String,
tags: Map[String, String] = Map.empty,
rolloutPercentage: Option[Int] = None)
}
class ToggleStateControllerSpec extends ControllerSpec {
import ToggleStateControllerSpec._
def createController(props: Props): ToggleStateController = {
val config = new Config {
override val actorTimeout = 100.millis
override val typesafeConfig = mock[TypesafeConfig]
override def auth = Authentication.Config(Seq.empty, disabled = false)
override def auditLog = AuditLog.Config()
override def toggleState = ToggleState.Config()
}
val system = ActorSystem()
val actor = system.actorOf(props)
val counter = mock[Counter]
new ToggleStateController(actor, config, counter, counter)
}
val toggles = Map(
"toggle-3" -> ToggleState("toggle-3",
activations = IndexedSeq(ToggleActivation(Map("country" -> Seq("de-DE", "de-AT")), Some(Rollout(25))))),
"toggle-2" -> ToggleState("toggle-2"),
"toggle-1" -> ToggleState("toggle-1", Map("team" -> "Toguru team"))
)
def toggleStateActorProps(toggles: Map[String,ToggleState]) =
Props(new Actor() { override def receive = { case GetState => sender ! ToggleStates(10, toggles.values.to[Vector].sortBy(_.id))}})
"get method" should {
"return current toggle state" in {
// prepare
implicit val rolloutReads = Json.reads[Rollout]
implicit val activationReads = Json.reads[ToggleActivation]
implicit val toggleReads = Json.reads[ToggleState]
implicit val reads = Json.reads[ToggleStates]
val controller: ToggleStateController = createController(toggleStateActorProps(toggles))
val request = FakeRequest().withAccept(ToggleStateController.MimeApiV3)
// execute
val result = controller.get(Some(9)).apply(request)
// verify
status(result) mustBe 200
val states = contentAsJson(result).as[ToggleStates]
states.toggles mustBe Seq(
toggles("toggle-1"),
toggles("toggle-2"),
toggles("toggle-3")
)
}
"return version specific format V2" in {
// prepare
implicit val toggleV2Reads = Json.reads[ToggleStateV2]
val togglesV2Reads = Json.reads[ToggleStatesV2]
val controller: ToggleStateController = createController(toggleStateActorProps(toggles))
val request = FakeRequest().withAccept(ToggleStateController.MimeApiV2)
// execute
val result = controller.get(Some(9)).apply(request)
// verify
status(result) mustBe 200
val states = contentAsJson(result).as(togglesV2Reads)
states.toggles mustBe Seq(
ToggleStateV2("toggle-1", Map("team" -> "Toguru team")),
ToggleStateV2("toggle-2"),
ToggleStateV2("toggle-3", rolloutPercentage = Some(25))
)
}
"return format V1 if requested" in {
// prepare
implicit val reads = Json.reads[ToggleStateV2]
val controller: ToggleStateController = createController(toggleStateActorProps(toggles))
val request = FakeRequest()
// execute
val result = controller.get(None).apply(request)
// verify
status(result) mustBe 200
val state = contentAsJson(result).as[Seq[ToggleStateV2]]
val expectedToggles = (1 to 3).map(i => stateV2(toggles(s"toggle-$i")))
state mustBe expectedToggles
}
"reject unknown toggle format requests" in {
val MimeApiV4 = "application/vnd.toguru.v4+json"
val controller: ToggleStateController = createController(toggleStateActorProps(toggles))
val request = FakeRequest().withAccept(MimeApiV4)
// execute
val result = controller.get(None).apply(request)
// verify
status(result) mustBe 406
val responseBody = contentAsJson(result)
val maybeAllowedContentTypes = (responseBody \\ "allowedContentTypes").asOpt[Seq[String]]
maybeAllowedContentTypes mustBe defined
maybeAllowedContentTypes.value mustNot be (empty)
}
"return Internal Server Error if seqNo is newer than server seqNo" in {
// prepare
val controller: ToggleStateController = createController(toggleStateActorProps(toggles))
val request = FakeRequest().withHeaders(HeaderNames.ACCEPT -> ToggleStateController.MimeApiV2)
// execute
val result = controller.get(Some(11)).apply(request)
// verify
status(result) mustBe 500
}
"return Internal Server Error if toggle state actor responds with initializing" in {
// prepare
val initializingActor = Props(new Actor() { override def receive = { case GetState => sender ! ToggleStateInitializing }})
val controller: ToggleStateController = createController(initializingActor)
val request = FakeRequest().withHeaders(HeaderNames.ACCEPT -> ToggleStateController.MimeApiV2)
// execute
val result = controller.get(Some(10)).apply(request)
// verify
status(result) mustBe 500
}
}
}
|
andreas-schroeder/toguru
|
test/toguru/toggles/ToggleStateControllerSpec.scala
|
Scala
|
mit
| 5,949
|
package org.jetbrains.plugins.scala.codeInsight.intention.types
import org.jetbrains.plugins.scala.ScalaBundle
/**
* Converts expression representing java collection to
* scala equivalent using [[scala.collection.JavaConverters]]
*
* @author Eugene Platonov
* 04/07/13
*/
class ConvertJavaToScalaCollectionIntention extends BaseJavaConvertersIntention("asScala") {
val targetCollections = Set(
"java.lang.Iterable",
"java.util.Iterator",
"java.util.Collection",
"java.util.Dictionary",
"java.util.Map"
)
val alreadyConvertedPrefixes: Set[String] = Set("scala.collection")
override def getText = ScalaBundle.message("convert.java.to.scala.collection.hint")
def getFamilyName = ConvertJavaToScalaCollectionIntention.getFamilyName
}
object ConvertJavaToScalaCollectionIntention {
def getFamilyName = ScalaBundle.message("convert.java.to.scala.collection.name")
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInsight/intention/types/ConvertJavaToScalaCollectionIntention.scala
|
Scala
|
apache-2.0
| 912
|
package org.littlewings.weldse.example
import javax.enterprise.inject.Instance
import org.jboss.weld.environment.se.{Weld, WeldContainer}
import org.scalatest.FunSpec
import org.scalatest.Matchers._
class WeldExample extends FunSpec {
describe("weld-se spec") {
it("simple use") {
val weld: Weld = new Weld
val container: WeldContainer = weld.initialize()
val instance: Instance[AnyRef] = container.instance
val calcService = instance.select(classOf[CalcService]).get
calcService.plus(1, 3) should be (4)
weld.shutdown()
}
it("normal") {
val weld: Weld = new Weld
val container: WeldContainer = weld.initialize()
val instance: Instance[AnyRef] = container.instance
val calcService1 = instance.select(classOf[CalcService]).get
val calcService2 = instance.select(classOf[CalcService]).get
calcService1 eq calcService2 should be (false)
weld.shutdown()
}
it("application-scoped") {
val weld: Weld = new Weld
val container: WeldContainer = weld.initialize()
val dateService1 = container.instance.select(classOf[DateService]).get
val dateService2 = container.instance.select(classOf[DateService]).get
dateService1 should be theSameInstanceAs dateService2
dateService1.time should be (dateService2.time)
weld.shutdown()
}
it("@Inject resolved") {
val weld: Weld = new Weld
val container: WeldContainer = weld.initialize()
val instance: Instance[AnyRef] = container.instance
val facadeService = instance.select(classOf[FacadeService]).get
val dateService = instance.select(classOf[DateService]).get
facadeService.dateService should be theSameInstanceAs dateService
facadeService.dateService.time should be (dateService.time)
facadeService.calcService.plus(1, 5) should be (6)
weld.shutdown()
}
}
}
|
kazuhira-r/javaee6-scala-examples
|
weld-se-example/src/test/scala/org/littlewings/weldse/example/WeldSeSpec.scala
|
Scala
|
mit
| 1,923
|
package org.chaomai.paraten.tensor
import breeze.linalg.DenseVector
import org.chaomai.paraten.{Common, UnitSpec}
/**
* Created by chaomai on 02/05/2017.
*/
class TestCoordinateTensor extends UnitSpec {
"A TensorFactory" should "build tensor from file" in {
val dim = Common.dim3TensorSize
val t = Common.dim3Tensor
println(t)
assert(t.dimension == dim.length)
assert(t.size == dim.product)
assert(t.shape == dim)
}
it should "build tensor from vals" in {
implicit val sc = Common.sc
val es = Seq(
TEntry((0, 0, 0, 0), 23.0),
TEntry((0, 0, 0, 1), 65.0),
TEntry((0, 1, 0, 0), 30.0),
TEntry((0, 1, 0, 1), 72.0),
TEntry((1, 0, 0, 0), 107.0),
TEntry((1, 0, 0, 1), 149.0),
TEntry((1, 1, 0, 0), 114.0),
TEntry((1, 1, 0, 1), 156.0)
)
val t = CoordinateTensor.vals(IndexedSeq(2, 2, 1, 2), es: _*)
println(t)
}
it should "build tensor from DenseVector" in {
implicit val sc = Common.sc
val v1 = DenseVector(1, 2, 3, 4, 5)
val t = CoordinateTensor.fromDenseVector(v1.length, v1)
println(t)
}
"A CoordinateTensor" should "get fiber from a dense tensor" in {
val t = Common.dim4DenseTensor
Common.debugMessage("fiber on mode 1")
t.fibersOnMode(1).foreach(println)
Common.debugMessage("fiber on mode 2")
t.fibersOnMode(2).foreach(println)
}
it should "get fiber from a sparse tensor" in {
val t = Common.dim4SparseTensor
Common.debugMessage("fiber on mode 1")
t.fibersOnMode(1).foreach(println)
Common.debugMessage("fiber on mode 2")
t.fibersOnMode(2).foreach(println)
}
it should "check tensor equality" in {
implicit val sc = Common.sc
val t = Common.dim4DenseTensor
assert(t :~== t)
}
it should "perform tensor addition" in {
implicit val sc = Common.sc
val t = Common.dim4DenseTensor
val t1 = t :+ 1
val t2 = t.map(_.map(_ + 1))
val t3 = t1 :+ t2
val t4 = t.map(_.map(v => (v + 1) * 2))
assert(t3 :~== t4)
println(t3)
}
it should "perform elementwise addition with different-numbered entries" in {
implicit val sc = Common.sc
val t = Common.dim4DenseTensor
val t1 = CoordinateTensor.vals(t.shape, TEntry((0, 0, 2, 0), 5.0))
val t2 = t :+ t1
val eOption =
t2.find(e => e.coordinate == Coordinate(IndexedSeq(0, 0, 2, 0)))
assert(eOption.isDefined)
assert(eOption.get.value == 10)
println(t2)
}
it should "perform scalar addition" in {
val t = Common.dim4DenseTensor
val t1 = t :+ 1
val t2 = t.map(_.map(_ + 1))
assert(t1 :~== t2)
println(t1)
}
it should "perform scalar production" in {
val t = Common.dim4DenseTensor
val t1 = t :* 5
val t2 = t.map(_.map(_ * 5))
assert(t1 :~== t2)
println(t1)
}
it should "perform outer product with vector" in {
implicit val sc = Common.sc
val v1 = DenseVector(1, 2, 3, 4, 5)
val t = CoordinateTensor.fromDenseVector(v1.length, v1)
val v2 = DenseVector(6, 7, 8, 9, 10)
val t1 = t <* v2
val outerv3 = v1 * v2.t
val entries = outerv3
.mapPairs { (coord, v) =>
TEntry(coord, v)
}
.toArray
.toSeq
val t2 =
CoordinateTensor.vals(IndexedSeq(v1.length, v2.length), entries: _*)
println(t1)
println(outerv3)
assert(t1 :~== t2)
}
it should "perform n-mode product" in {
implicit val sc = Common.sc
val t = Common.dim4DenseTensor
val v = sc.broadcast(DenseVector[Double](2, 2, 3))
val nModeProd = t nModeProd(2, v)
println(nModeProd)
assert(nModeProd.shape == IndexedSeq(2, 2, 1, 2))
val eOption =
nModeProd.find(e => e.coordinate == Coordinate(IndexedSeq(1, 0, 0, 0)))
assert(eOption.isDefined)
assert(eOption.get.value == 107)
val es = Seq(
TEntry((0, 0, 0, 0), 23.0),
TEntry((0, 0, 0, 1), 65.0),
TEntry((0, 1, 0, 0), 30.0),
TEntry((0, 1, 0, 1), 72.0),
TEntry((1, 0, 0, 0), 107.0),
TEntry((1, 0, 0, 1), 149.0),
TEntry((1, 1, 0, 0), 114.0),
TEntry((1, 1, 0, 1), 156.0)
)
val t1 = CoordinateTensor.vals(IndexedSeq(2, 2, 1, 2), es: _*)
assert(nModeProd :~== t1)
}
}
|
ChaoMai/ParaTen
|
src/test/scala/org/chaomai/paraten/tensor/TestCoordinateTensor.scala
|
Scala
|
apache-2.0
| 4,238
|
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import scala.{specialized=>spec}
import breeze.generic._
import breeze.linalg.support._
import breeze.linalg.operators._
import breeze.math._
import breeze.util.{ArrayUtil, Isomorphism}
import breeze.storage.Zero
import scala.reflect.ClassTag
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import breeze.macros.expand
import scala.math.BigInt
import spire.syntax.cfor._
import CanTraverseValues.ValuesVisitor
import CanZipAndTraverseValues.PairValuesVisitor
import java.io.ObjectStreamException
import scalaxy.debug._
/**
* A DenseVector is the "obvious" implementation of a Vector, with one twist.
* The underlying data may have more data than the Vector, represented using an offset
* into the array (for the 0th element), and a stride that is how far elements are apart
* from one another.
*
* The i'th element is at offset + i * stride
*
* @author dlwh
*
* @param data data array
* @param offset index of the 0'th element
* @param stride separation between elements
* @param length number of elements
*/
@SerialVersionUID(1L) // TODO: scala doesn't propagate this to specialized subclasses. Sigh.
class DenseVector[@spec(Double, Int, Float, Long) V](val data: Array[V],
val offset: Int,
val stride: Int,
val length: Int) extends StorageVector[V]
with VectorLike[V, DenseVector[V]] with Serializable{
def this(data: Array[V]) = this(data, 0, 1, data.length)
def this(data: Array[V], offset: Int) = this(data, offset, 1, data.length)
def this(length: Int)(implicit man: ClassTag[V]) = this(new Array[V](length), 0, 1, length)
// uncomment to get all the ridiculous places where specialization fails.
// if(data.isInstanceOf[Array[Double]] && getClass.getName() == "breeze.linalg.DenseVector") throw new Exception("...")
// ensure that operators are all loaded.
DenseVector.init()
def repr: DenseVector[V] = this
def activeSize = length
def apply(i: Int): V = {
if(i < - size || i >= size) throw new IndexOutOfBoundsException(i + " not in [-"+size+","+size+")")
val trueI = if(i<0) i+size else i
if (noOffsetOrStride) {
data(trueI)
} else {
data(offset + trueI * stride)
}
}
def update(i: Int, v: V): Unit = {
if(i < - size || i >= size) throw new IndexOutOfBoundsException(i + " not in [-"+size+","+size+")")
val trueI = if(i<0) i+size else i
if (noOffsetOrStride) {
data(trueI) = v
} else {
data(offset + trueI * stride) = v
}
}
private[linalg] val noOffsetOrStride = offset == 0 && stride == 1
@deprecated("This isn't actually any faster any more", "0.12-SNAPSHOT")
def unsafeUpdate(i: Int, v: V): Unit = if (noOffsetOrStride) data(i) = v else data(offset+i*stride) = v
def activeIterator: Iterator[(Int, V)] = iterator
def activeValuesIterator: Iterator[V] = valuesIterator
def activeKeysIterator: Iterator[Int] = keysIterator
override def equals(p1: Any) = p1 match {
case y: DenseVector[_] =>
y.length == length && ArrayUtil.nonstupidEquals(data, offset, stride, length, y.data, y.offset, y.stride, y.length)
case _ => super.equals(p1)
}
override def toString = {
valuesIterator.mkString("DenseVector(",", ", ")")
}
/**
* Returns a copy of this DenseVector. stride will always be 1, offset will always be 0.
* @return
*/
def copy: DenseVector[V] = {
implicit val man = ClassTag[V](data.getClass.getComponentType.asInstanceOf[Class[V]])
val r = new DenseVector(new Array[V](length))
r := this
r
}
/**
* same as apply(i). Gives the value at the underlying offset.
* @param i index into the data array
* @return apply(i)
*/
def valueAt(i: Int): V = apply(i)
/**
* Unsafe version of above, a way to skip the checks.
*/
@deprecated("This isn't actually any faster any more", "0.12-SNAPSHOT")
def unsafeValueAt(i: Int): V = data(offset + i * stride)
/**
* Gives the logical index from the physical index.
* @param i
* @return i
*/
def indexAt(i: Int): Int = i
/**
* Always returns true.
*
* Some storages (namely HashStorage) won't have active
* indices packed. This lets you know if the bin is
* actively in use.
* @param i index into index/data arrays
* @return
*/
def isActive(i: Int): Boolean = true
/**
* Always returns true.
* @return
*/
def allVisitableIndicesActive: Boolean = true
/**
* Faster foreach
* @param fn
* @tparam U
*/
override def foreach[@spec(Unit) U](fn: (V) => U): Unit = {
if (stride == 1) { // ABCE stuff
cforRange(offset until (offset + length)) { j =>
fn(data(j))
}
} else {
var i = offset
cforRange(0 until length) { j =>
fn(data(i))
i += stride
}
}
}
/**
* Slices the DenseVector, in the range [start,end] with a stride stride.
* @param start
* @param end
* @param stride
*/
def slice(start: Int, end: Int, stride: Int=1): DenseVector[V] = {
if(start > end || start < 0) throw new IllegalArgumentException("Slice arguments " + start +", " +end +" invalid.")
if(end > length || end < 0) throw new IllegalArgumentException("End " + end + "is out of bounds for slice of DenseVector of length " + length)
new DenseVector(data, start * this.stride + offset, stride * this.stride, (end-start)/stride)
}
// <editor-fold defaultstate="collapsed" desc=" Conversions (DenseMatrix, Array, Scala Vector) ">
/** Creates a copy of this DenseVector that is represented as a 1 by length DenseMatrix */
def toDenseMatrix: DenseMatrix[V] = {
copy.asDenseMatrix
}
/** Creates a view of this DenseVector that is represented as a 1 by length DenseMatrix */
def asDenseMatrix: DenseMatrix[V] = {
new DenseMatrix[V](1, length, data, offset, stride)
}
override def toArray(implicit cm: ClassTag[V]): Array[V] = if(stride == 1){
ArrayUtil.copyOfRange(data, offset, offset + length)
} else {
val arr = new Array[V](length)
var i = 0
var off = offset
while(i < length) {
arr(i) = data(off)
off += stride
i += 1
}
arr
}
/**Returns copy of this [[breeze.linalg.DenseVector]] as a [[scala.Vector]]*/
def toScalaVector()(implicit cm: ClassTag[V]): scala.Vector[V] = this.toArray.toVector
// </editor-fold>
@throws(classOf[ObjectStreamException])
protected def writeReplace(): Object = {
new DenseVector.SerializedForm(data, offset, stride, length)
}
}
object DenseVector extends VectorConstructors[DenseVector]
with DenseVector_GenericOps
with DenseVectorOps
with DenseVector_OrderingOps
with DenseVector_SpecialOps {
def zeros[@spec(Double, Int, Float, Long) V: ClassTag : Zero](size: Int): DenseVector[V] = {
val data = new Array[V](size)
if(size != 0 && data(0) != implicitly[Zero[V]].zero)
ArrayUtil.fill(data, 0, data.length, implicitly[Zero[V]].zero)
new DenseVector(data)
}
def apply[@spec(Double, Int, Float, Long) V](values: Array[V]): DenseVector[V] = new DenseVector(values)
def ones[@spec(Double, Int, Float, Long) V: ClassTag:Semiring](size: Int): DenseVector[V] = fill[V](size, implicitly[Semiring[V]].one)
def fill[@spec(Double, Int, Float, Long) V: ClassTag:Semiring](size: Int, v: V): DenseVector[V] = {
val r = apply(new Array[V](size))
assert(r.stride == 1)
ArrayUtil.fill(r.data, r.offset, r.length, v)
r
}
// concatenation
/**
* Horizontal concatenation of two or more vectors into one matrix.
* @throws IllegalArgumentException if vectors have different sizes
*/
def horzcat[V: ClassTag:Zero](vectors: DenseVector[V]*): DenseMatrix[V] = {
val size = vectors.head.size
if (!(vectors forall (_.size == size)))
throw new IllegalArgumentException("All vectors must have the same size!")
val result = DenseMatrix.zeros[V](size, vectors.size)
for ((v, col) <- vectors.zipWithIndex)
result(::, col) := v
result
}
/**
* Vertical concatenation of two or more column vectors into one large vector.
*/
def vertcat[V](vectors: DenseVector[V]*)(implicit canSet: OpSet.InPlaceImpl2[DenseVector[V], DenseVector[V]], vman: ClassTag[V], zero: Zero[V]): DenseVector[V] = {
val size = vectors.foldLeft(0)(_ + _.size)
val result = zeros[V](size)
var offset = 0
for (v <- vectors) {
result.slice(offset, offset + v.size) := v
offset += v.size
}
result
}
// capabilities
implicit def canCreateZerosLike[V:ClassTag:Zero]:CanCreateZerosLike[DenseVector[V], DenseVector[V]] =
new CanCreateZerosLike[DenseVector[V], DenseVector[V]] {
def apply(v1: DenseVector[V]): DenseVector[V] = {
zeros[V](v1.length)
}
}
implicit def canCopyDenseVector[V:ClassTag]: CanCopy[DenseVector[V]] = {
new CanCopy[DenseVector[V]] {
def apply(v1: DenseVector[V]): DenseVector[V] = {
v1.copy
}
}
}
implicit def negFromScale[V](implicit scale: OpMulScalar.Impl2[DenseVector[V], V, DenseVector[V]], field: Ring[V]) = {
new OpNeg.Impl[DenseVector[V], DenseVector[V]] {
override def apply(a : DenseVector[V]): DenseVector[V] = {
scale(a, field.negate(field.one))
}
}
}
implicit def canMapValues[@specialized(Int, Float, Double) V, @specialized(Int, Float, Double) V2](implicit man: ClassTag[V2]): CanMapValues[DenseVector[V], V, V2, DenseVector[V2]] = {
new CanMapValues[DenseVector[V], V, V2, DenseVector[V2]] {
/**Maps all key-value pairs from the given collection. */
def apply(from: DenseVector[V], fn: (V) => V2): DenseVector[V2] = {
val out = new Array[V2](from.length)
// threeway fork, following benchmarks and hotspot docs on Array Bounds Check Elimination (ABCE)
// https://wikis.oracle.com/display/HotSpotInternals/RangeCheckElimination
if (from.noOffsetOrStride) {
fastestPath(out, fn, from.data)
} else if (from.stride == 1) {
mediumPath(out, fn, from.data, from.offset)
} else {
slowPath(out, fn, from.data, from.offset, from.stride)
}
new DenseVector[V2](out)
}
private def mediumPath(out: Array[V2], fn: (V) => V2, data: Array[V], off: Int): Unit = {
cforRange(0 until out.length) { j =>
out(j) = fn(data(j + off))
}
}
private def fastestPath(out: Array[V2], fn: (V) => V2, data: Array[V]): Unit = {
cforRange(0 until out.length) { j =>
out(j) = fn(data(j))
}
}
final private def slowPath(out: Array[V2], fn: (V) => V2, data: Array[V], off: Int, stride: Int): Unit = {
var i = 0
var j = off
while (i < out.length) {
out(i) = fn(data(j))
i += 1
j += stride
}
}
}
}
implicit def scalarOf[T]: ScalarOf[DenseVector[T], T] = ScalarOf.dummy
implicit def canIterateValues[V]: CanTraverseValues[DenseVector[V], V] =
new CanTraverseValues[DenseVector[V], V] {
def isTraversableAgain(from: DenseVector[V]): Boolean = true
/** Iterates all key-value pairs from the given collection. */
def traverse(from: DenseVector[V], fn: ValuesVisitor[V]): Unit = {
fn.visitArray(from.data, from.offset, from.length, from.stride)
}
}
implicit def canTraverseZipValues[V,W]: CanZipAndTraverseValues[DenseVector[V], DenseVector[W], V,W] =
new CanZipAndTraverseValues[DenseVector[V], DenseVector[W], V,W] {
/** Iterates all key-value pairs from the given collection. */
def traverse(from1: DenseVector[V], from2: DenseVector[W], fn: PairValuesVisitor[V,W]): Unit = {
if (from1.size != from2.size) {
throw new IllegalArgumentException("Vectors to be zipped must have same size")
}
cfor(0)(i => i < from1.size, i => i+1)(i => {
fn.visit(from1(i), from2(i))
})
}
}
implicit def canTraverseKeyValuePairs[V]: CanTraverseKeyValuePairs[DenseVector[V], Int, V] =
new CanTraverseKeyValuePairs[DenseVector[V], Int, V] {
def isTraversableAgain(from: DenseVector[V]): Boolean = true
/** Iterates all key-value pairs from the given collection. */
def traverse(from: DenseVector[V], fn: CanTraverseKeyValuePairs.KeyValuePairsVisitor[Int, V]): Unit = {
import from._
fn.visitArray((ind: Int)=> (ind - offset)/stride, data, offset, length, stride)
}
}
implicit def canTransformValues[@specialized(Int, Float, Double) V]: CanTransformValues[DenseVector[V], V] =
new CanTransformValues[DenseVector[V], V] {
def transform(from: DenseVector[V], fn: (V) => V) {
val data = from.data
val length = from.length
val stride = from.stride
val offset = from.offset
if (stride == 1) {
cforRange(offset until offset + length) { j =>
data(j) = fn(data(j))
}
} else {
slowPath(fn, data, length, stride, offset)
}
}
private def slowPath(fn: (V) => V, data: Array[V], length: Int, stride: Int, offset: Int): Unit = {
val end = offset + stride * length
var j = offset
while (j != end) {
data(j) = fn(data(j))
j += stride
}
}
def transformActive(from: DenseVector[V], fn: (V) => V) {
transform(from, fn)
}
}
implicit def canMapPairs[V, V2](implicit man: ClassTag[V2]):CanMapKeyValuePairs[DenseVector[V], Int, V, V2, DenseVector[V2]] =
new CanMapKeyValuePairs[DenseVector[V], Int, V, V2, DenseVector[V2]] {
/**Maps all key-value pairs from the given collection. */
def map(from: DenseVector[V], fn: (Int, V) => V2): DenseVector[V2] = {
// slow: DenseVector.tabulate(from.length)(i => fn(i, from(i)))
val arr = new Array[V2](from.length)
val d = from.data
val stride = from.stride
var i = 0
var j = from.offset
while(i < arr.length) {
arr(i) = fn(i, d(j))
i += 1
j += stride
}
new DenseVector[V2](arr)
}
/**Maps all active key-value pairs from the given collection. */
def mapActive(from: DenseVector[V], fn: (Int, V) => V2): DenseVector[V2] = {
map(from, fn)
}
}
// slicing
// specialize to get the good class
implicit def canSlice[@specialized(Int, Float, Double) V]: CanSlice[DenseVector[V], Range, DenseVector[V]] = {
new CanSlice[DenseVector[V], Range, DenseVector[V]] {
def apply(v: DenseVector[V], re: Range): DenseVector[V] = {
val range: Range = re.getRangeWithoutNegativeIndexes( v.length )
require(range.isEmpty || range.last < v.length)
require(range.isEmpty || range.start >= 0)
new DenseVector(v.data, offset = v.offset + v.stride * range.start, stride = v.stride * range.step, length = range.length)
}
}
}
implicit def canTransposeComplex: CanTranspose[DenseVector[Complex], DenseMatrix[Complex]] = {
new CanTranspose[DenseVector[Complex], DenseMatrix[Complex]] {
def apply(from: DenseVector[Complex]): DenseMatrix[Complex] = {
new DenseMatrix(data = from.data map { _.conjugate },
offset = from.offset,
cols = from.length,
rows = 1,
majorStride = from.stride)
}
}
}
class CanZipMapValuesDenseVector[@spec(Double, Int, Float, Long) V, @spec(Int, Double) RV:ClassTag] extends CanZipMapValues[DenseVector[V],V,RV,DenseVector[RV]] {
def create(length : Int) = new DenseVector(new Array[RV](length))
/**Maps all corresponding values from the two collection. */
def map(from: DenseVector[V], from2: DenseVector[V], fn: (V, V) => RV): DenseVector[RV] = {
require(from.length == from2.length, s"Vectors must have same length")
val result = create(from.length)
var i = 0
while (i < from.length) {
result.data(i) = fn(from(i), from2(i))
i += 1
}
result
}
}
implicit def zipMap[V, R:ClassTag]: CanZipMapValuesDenseVector[V, R] = new CanZipMapValuesDenseVector[V, R]
implicit val zipMap_d: CanZipMapValuesDenseVector[Double, Double] = new CanZipMapValuesDenseVector[Double, Double]
implicit val zipMap_f: CanZipMapValuesDenseVector[Float, Float] = new CanZipMapValuesDenseVector[Float, Float]
implicit val zipMap_i: CanZipMapValuesDenseVector[Int, Int] = new CanZipMapValuesDenseVector[Int, Int]
class CanZipMapKeyValuesDenseVector[@spec(Double, Int, Float, Long) V, @spec(Int, Double) RV:ClassTag] extends CanZipMapKeyValues[DenseVector[V],Int, V,RV,DenseVector[RV]] {
def create(length : Int) = new DenseVector(new Array[RV](length))
/**Maps all corresponding values from the two collection. */
def map(from: DenseVector[V], from2: DenseVector[V], fn: (Int, V, V) => RV): DenseVector[RV] = {
require(from.length == from2.length, "Vector lengths must match!")
val result = create(from.length)
var i = 0
while (i < from.length) {
result.data(i) = fn(i, from(i), from2(i))
i += 1
}
result
}
override def mapActive(from: DenseVector[V], from2: DenseVector[V], fn: ((Int), V, V) => RV): DenseVector[RV] = {
map(from, from2, fn)
}
}
implicit def zipMapKV[V, R:ClassTag]: CanZipMapKeyValuesDenseVector[V, R] = new CanZipMapKeyValuesDenseVector[V, R]
implicit val canAddIntoD: OpAdd.InPlaceImpl2[DenseVector[Double], DenseVector[Double]] = {
new OpAdd.InPlaceImpl2[DenseVector[Double], DenseVector[Double]] {
def apply(a: DenseVector[Double], b: DenseVector[Double]) = {
canDaxpy(a, 1.0, b)
}
implicitly[BinaryUpdateRegistry[Vector[Double], Vector[Double], OpAdd.type]].register(this)
}
}
implicit object canDaxpy extends scaleAdd.InPlaceImpl3[DenseVector[Double], Double, DenseVector[Double]] with Serializable {
def apply(y: DenseVector[Double], a: Double, x: DenseVector[Double]) {
require(x.length == y.length, s"Vectors must have same length")
// using blas here is always a bad idea.
if (x.noOffsetOrStride && y.noOffsetOrStride) {
val ad = x.data
val bd = y.data
cforRange(0 until x.length) { i =>
bd(i) += ad(i) * a
}
} else {
cforRange(0 until x.length) { i =>
y(i) += x(i) * a
}
}
}
}
implicitly[TernaryUpdateRegistry[Vector[Double], Double, Vector[Double], scaleAdd.type]].register(canDaxpy)
implicit val canAddD: OpAdd.Impl2[DenseVector[Double], DenseVector[Double], DenseVector[Double]] = {
pureFromUpdate_Double(canAddIntoD)
}
implicitly[BinaryRegistry[Vector[Double], Vector[Double], OpAdd.type, Vector[Double]]].register(canAddD)
implicit val canSubIntoD: OpSub.InPlaceImpl2[DenseVector[Double], DenseVector[Double]] = {
new OpSub.InPlaceImpl2[DenseVector[Double], DenseVector[Double]] {
def apply(a: DenseVector[Double], b: DenseVector[Double]) = {
canDaxpy(a, -1.0, b)
}
implicitly[BinaryUpdateRegistry[Vector[Double], Vector[Double], OpSub.type]].register(this)
}
}
implicit val canSubD: OpSub.Impl2[DenseVector[Double], DenseVector[Double], DenseVector[Double]] = {
pureFromUpdate_Double(canSubIntoD)
}
implicitly[BinaryRegistry[Vector[Double], Vector[Double], OpSub.type, Vector[Double]]].register(canSubD)
implicit object canDotD extends OpMulInner.Impl2[DenseVector[Double], DenseVector[Double], Double] {
def apply(a: DenseVector[Double], b: DenseVector[Double]) = {
require(a.length == b.length, s"Vectors must have same length")
if (a.noOffsetOrStride && b.noOffsetOrStride && a.length < DenseVectorSupportMethods.MAX_SMALL_DOT_PRODUCT_LENGTH) {
DenseVectorSupportMethods.smallDotProduct_Double(a.data, b.data, a.length)
} else if (a.length < 200) { // benchmarks suggest break-even point is around length 200
if (a.noOffsetOrStride && b.noOffsetOrStride) {
fastMediumSizePath(a, b)
} else {
slowMediumSizePath(a, b)
}
} else {
blasPath(a, b)
}
}
private def blasPath(a: DenseVector[Double], b: DenseVector[Double]): Double = {
val boff = if (b.stride >= 0) b.offset else (b.offset + b.stride * (b.length - 1))
val aoff = if (a.stride >= 0) a.offset else (a.offset + a.stride * (a.length - 1))
blas.ddot(
a.length, b.data, boff, b.stride, a.data, aoff, a.stride)
}
private def slowMediumSizePath(a: DenseVector[Double], b: DenseVector[Double]): Double = {
var sum = 0.0
cforRange(0 until a.length) { i =>
sum += a(i) * b(i)
}
sum
}
private def fastMediumSizePath(a: DenseVector[Double], b: DenseVector[Double]): Double = {
var sum = 0.0
val ad = a.data
val bd = b.data
cforRange(0 until a.length) { i =>
sum += ad(i) * bd(i)
}
sum
}
}
implicitly[BinaryRegistry[Vector[Double], Vector[Double], OpMulInner.type, Double]].register(canDotD)
/*
TODO: scaladoc crashes on this. I don't know why. It makes me want to die a little.
Returns the k-norm of this Vector.
*/
@expand
@expand.valify
implicit def canNorm[@expand.args(Int, Float, Long, BigInt, Complex) T]: norm.Impl2[DenseVector[T], Double, Double] = {
new norm.Impl2[DenseVector[T], Double, Double] {
def apply(v: DenseVector[T], n: Double): Double = {
import v._
if (n == 1) {
var sum = 0.0
foreach (v => sum += v.abs.toDouble )
sum
} else if (n == 2) {
var sum = 0.0
foreach (v => { val nn = v.abs.toDouble; sum += nn * nn })
math.sqrt(sum)
} else if (n == Double.PositiveInfinity) {
var max = 0.0
foreach (v => { val nn = v.abs.toDouble; if (nn > max) max = nn })
max
} else {
var sum = 0.0
foreach (v => { val nn = v.abs.toDouble; sum += math.pow(nn,n) })
math.pow(sum, 1.0 / n)
}
}
}
}
/**
* Returns the p-norm of this Vector (specialized for Double).
*/
implicit def canNorm_Double: norm.Impl2[DenseVector[Double], Double, Double] = {
new norm.Impl2[DenseVector[Double], Double, Double] {
def apply(v: DenseVector[Double], p: Double): Double = {
if (p == 2) {
var sq = 0.0
v.foreach (x => sq += x * x)
math.sqrt(sq)
} else if (p == 1) {
var sum = 0.0
v.foreach (x => sum += math.abs(x))
sum
} else if (p == Double.PositiveInfinity) {
var max = 0.0
v.foreach (x => max = math.max(max, math.abs(x)))
max
} else if (p == 0) {
var nnz = 0
v.foreach (x => if (x != 0) nnz += 1)
nnz
} else {
var sum = 0.0
v.foreach (x => sum += math.pow(math.abs(x), p))
math.pow(sum, 1.0 / p)
}
}
}
}
implicit def canDim[E]: dim.Impl[DenseVector[E],Int] = new dim.Impl[DenseVector[E],Int] {
def apply(v: DenseVector[E]): Int = v.length
}
// this produces bad spaces for builtins (inefficient because of bad implicit lookup)
implicit def space[E](implicit field: Field[E], man: ClassTag[E]): MutableFiniteCoordinateField[DenseVector[E],Int,E] = {
import field._
implicit val cmv = canMapValues[E,E]
MutableFiniteCoordinateField.make[DenseVector[E],Int,E]
}
implicit val space_Double: MutableFiniteCoordinateField[DenseVector[Double], Int, Double] = {
MutableFiniteCoordinateField.make[DenseVector[Double],Int,Double]
}
implicit val space_Float: MutableFiniteCoordinateField[DenseVector[Float], Int, Float] = {
MutableFiniteCoordinateField.make[DenseVector[Float],Int,Float]
}
implicit val space_Int: MutableFiniteCoordinateField[DenseVector[Int], Int, Int] = {
MutableFiniteCoordinateField.make[DenseVector[Int],Int,Int]
}
implicit val space_Long: MutableFiniteCoordinateField[DenseVector[Long], Int, Long] = {
MutableFiniteCoordinateField.make[DenseVector[Long],Int,Long]
}
object TupleIsomorphisms {
implicit object doubleIsVector extends Isomorphism[Double,DenseVector[Double]] {
def forward(t: Double) = DenseVector(t)
def backward(t: DenseVector[Double]) = { assert(t.size == 1); t(0)}
}
implicit object pdoubleIsVector extends Isomorphism[(Double,Double),DenseVector[Double]] {
def forward(t: (Double,Double)) = DenseVector(t._1,t._2)
def backward(t: DenseVector[Double]) = { assert(t.size == 2); (t(0),t(1))}
}
}
/**
* This class exists because @specialized instances don't respect the serial
* @param data
* @param offset
* @param stride
* @param length
*/
@SerialVersionUID(1L)
case class SerializedForm(data: Array[_],
offset: Int,
stride: Int,
length: Int) extends Serializable {
@throws(classOf[ObjectStreamException])
def readResolve():Object = {
data match {//switch to make specialized happy
case x: Array[Int] => new DenseVector(x, offset, stride, length)
case x: Array[Long] => new DenseVector(x, offset, stride, length)
case x: Array[Double] => new DenseVector(x, offset, stride, length)
case x: Array[Float] => new DenseVector(x, offset, stride, length)
case x: Array[Short] => new DenseVector(x, offset, stride, length)
case x: Array[Byte] => new DenseVector(x, offset, stride, length)
case x: Array[Char] => new DenseVector(x, offset, stride, length)
case x: Array[_] => new DenseVector(x, offset, stride, length)
}
}
}
// used to make sure the operators are loaded
@noinline
private def init() = {}
}
|
chen0031/breeze
|
math/src/main/scala/breeze/linalg/DenseVector.scala
|
Scala
|
apache-2.0
| 26,741
|
/*
* Copyright 2016 Nokia Solutions and Networks Oy
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.nokia.ntp.ct
package persistence
import scala.collection.mutable
import scala.concurrent.Future
import akka.persistence.SelectedSnapshot
import akka.persistence.SnapshotMetadata
import akka.persistence.SnapshotSelectionCriteria
import akka.persistence.snapshot.SnapshotStore
import cats.implicits._
/** In-memory snapshot store, only for testing. */
final class MockSnapshotStore extends SnapshotStore {
// we have to store the snapshots somewhere ...
@SuppressWarnings(Array("org.wartremover.warts.MutableDataStructures"))
private[this] val state: mutable.Map[String, List[(SnapshotMetadata, Any)]] =
mutable.Map()
override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = {
val lst = state.getOrElse(persistenceId, Nil)
state.put(persistenceId, lst.filterNot { case (md, _) => matches(md, criteria) })
Future.successful(())
}
override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = {
val lst = state.getOrElse(metadata.persistenceId, Nil)
state.put(metadata.persistenceId, lst.filterNot { case (md, _) => md === metadata })
Future.successful(())
}
override def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
val lst = state.getOrElse(persistenceId, Nil)
val hit = lst.find {
case (metadata, _) =>
matches(metadata, criteria)
}
Future.successful(hit.map { case (md, ss) => SelectedSnapshot(md, ss) })
}
override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = {
val lst = state.getOrElse(metadata.persistenceId, Nil)
state.put(metadata.persistenceId, (metadata, snapshot) :: lst)
Future.successful(())
}
private[this] def matches(metadata: SnapshotMetadata, criteria: SnapshotSelectionCriteria): Boolean = {
(metadata.sequenceNr <= criteria.maxSequenceNr) && (metadata.timestamp <= criteria.maxTimestamp) &&
(metadata.sequenceNr >= criteria.minSequenceNr) && (metadata.timestamp >= criteria.minTimestamp)
}
}
|
nokia/akka-typed-persistence
|
src/test/scala/com/nokia/ntp/ct/persistence/MockSnapshotStore.scala
|
Scala
|
apache-2.0
| 2,697
|
package rta.concurrent
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import scala.util.control.NonFatal
sealed trait Atomic[T] {
def get: T
def set(newValue: T): Unit
def compareAndSet(expect: T, update: T): Boolean
@inline final def update(f: T => T): Either[Throwable, T] = {
@inline @tailrec def set(): T = {
val oldValue = get
val newValue = f(oldValue)
if (!compareAndSet(oldValue, newValue)) set()
else newValue
}
try {
Right(concurrent.blocking(set()))
} catch {
case NonFatal(th) => Left(th)
}
}
}
object Atomic {
def apply[T](value: T): Atomic[T] = new Atomic[T] {
private[this] val ref = new AtomicReference[T](value)
@inline final def get: T = ref.get()
@inline final def set(newValue: T): Unit = ref.set(newValue)
@inline final def compareAndSet(expect: T, update: T): Boolean = ref.compareAndSet(expect, update)
}
}
|
kjanosz/RuleThemAll
|
utils/src/main/scala/rta/concurrent/Atomic.scala
|
Scala
|
apache-2.0
| 962
|
package org.higherstate.jameson.parsers
import org.higherstate.jameson.Path
import org.higherstate.jameson.tokenizers._
import org.higherstate.jameson.failures._
case class OptionParser[T](parser:Parser[T]) extends Parser[Option[T]] {
def parse(tokenizer:Tokenizer, path:Path) =
tokenizer.head match {
case NullToken | EndToken =>
Success(None)
case _ =>
parser.parse(tokenizer, path).map(Some(_))
}
override def default = Some(None)
def schema = parser.schema + ("defaultValue" -> null)
}
|
HigherState/jameson
|
src/main/scala/org/higherstate/jameson/parsers/OptionParser.scala
|
Scala
|
apache-2.0
| 538
|
package slamdata.engine.std
import scalaz._
import slamdata.engine.{Data, Func, Type, Mapping, SemanticError}
import SemanticError._
import Validation.{success, failure}
import NonEmptyList.nel
trait DateLib extends Library {
// NB: SQL specifies a function called `extract`, but that doesn't have comma-
// separated arguments. `date_part` is Postgresβ name for the same thing
// with commas.
val Extract = Mapping(
"date_part",
"Pulls out a part of the date.",
Type.Str :: Type.Temporal :: Nil,
partialTyper {
case Type.Const(Data.Str(_)) :: Type.Temporal :: Nil => Type.Numeric
},
_ match {
case Type.Numeric => success(Type.Str :: Type.Temporal :: Nil)
case t => failure(nel(TypeError(Type.Numeric, t, Some("numeric function where non-numeric expression is expected")), Nil))
}
)
def functions = Extract :: Nil
}
object DateLib extends DateLib
|
mossprescott/quasar
|
src/main/scala/slamdata/engine/std/date.scala
|
Scala
|
agpl-3.0
| 921
|
package com.intenthq.pucket.thrift.writer
import com.intenthq.pucket.thrift._
import com.intenthq.pucket.writer.Writer
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.ParquetWriter
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.apache.parquet.thrift.ThriftParquetWriter
import scalaz.\\/
/** Functional wrapper for thrift parquet writer */
case class ThriftWriter[T] private (writer: ParquetWriter[T]) extends Writer[T, Throwable] {
/** @inheritdoc */
override def write(data: T, checkPoint: Long = 0): Throwable \\/ ThriftWriter[T] =
\\/.fromTryCatchNonFatal(writer.write(data)).map(_ => this)
/** @inheritdoc */
override def close: Throwable \\/ Unit = \\/.fromTryCatchNonFatal(writer.close())
}
/** Factory object for [[ThriftWriter]] */
object ThriftWriter {
/** Create a new thrift writer
*
* @param schemaClass the thrift schema class
* @param path path to the file for writing
* @param compression compression codec
* @param blockSize parquet block siz
* @param conf hadoop configuration
* @tparam T type of data to be written
* @return a new thrift writer or a creation error
*/
def apply[T <: Thrift](schemaClass: Class[T],
path: Path,
compression: CompressionCodecName,
blockSize: Int,
conf: Configuration): Throwable \\/ ThriftWriter[T] =
\\/.fromTryCatchNonFatal(
ThriftWriter(
new ThriftParquetWriter[T](
path,
schemaClass,
compression,
blockSize,
ParquetWriter.DEFAULT_PAGE_SIZE,
ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED,
ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED,
conf)
)
)
}
|
intenthq/pucket
|
thrift/src/main/scala/com/intenthq/pucket/thrift/writer/ThriftWriter.scala
|
Scala
|
mit
| 1,846
|
import cascading.pipe.Pipe
import com.twitter.scalding._
/**
Scalding Tutorial ported to use the Type-safe API (TDsl)
(rather than Cascading's Fields API). The examples here roughly correspond
to those in `tutorial/Tutorial{0..5}.scala`.
These tutorials are all run from this single file; which one is run can
be chosen with a command-line flag "--tutorial". For instance, to run the
first tutorial example:
yarn jar target/scalding-tutorial-0.14.0.jar TypedTutorial --local \\
--tutorial 0 \\
--input data/hello.txt \\
--output target/data/output0.txt \\
--words data/word_scores.tsv
(Note: only tutorial 5 uses "word_scores.tsv")
**/
class TypedTutorial(args : Args) extends Job(args) {
args("tutorial") match {
/**
Tutorial {0,1}: Write out to a TSV file.
----------------------------------------
In this first version we will be as explicit as possible to show all
the steps required to go from a raw text file to a typed stream.
**/
case "0" | "1" => {
// The TextLine source splits the input by lines.
val textSource = TextLine(args("input"))
// Create a type-safe pipe from the TextLine.
val lines: TypedPipe[String] =
TypedPipe.from[String](textSource)
// Write the typed pipe out to a tab-delimited file.
lines.write(TypedTsv[String](args("output")))
}
/**
Tutorial 2: Simple map
----------------------
Reverse all the strings. Notice that we've now left off the [String] type.
Scala can generally infer these types for us, making the code cleaner.
**/
case "2" | "map" => {
// Create a typed pipe from the TextLine (of type TypedPipe[String] still)
TypedPipe.from(TextLine(args("input")))
// Transform each line, reversing it. Output is a new TypedPipe, still of String.
.map(_.reverse)
// Note, the types for the TypedTsv *can* be inferred by Scala here.
// However, it's best to specify them explicitly so that if the
// output type changes, it is detected and doesn't break the next
// thing to read from the output file.
.write(TypedTsv[String](args("output")))
}
/**
Tutorial 3: Flat Map
---------------------
Dump all the words.
**/
case "3" | "flatmap" => {
TypedPipe.from(TextLine(args("input")))
// flatMap is like map, but instead of returning a single item
// from the function, we return a collection of items. Each of
// these items will create a new entry in the data stream; here,
// we'll end up with a new entry for each word.
.flatMap(_.split("\\\\s"))
// output of flatMap is still a collection of String
.write(TypedTsv[String](args("output")))
}
/**
Tutorial 4: Word Count
----------------------
Now that we have a stream of words, clearly we're ready for
that most exciting of MapReduce examples: the Word Count.
**/
case "4" | "wordcount" => {
// Get the words (just like above in case "3")
val words = TypedPipe.from(TextLine(args("input")))
.flatMap(_.split("\\\\s"))
// To count the words, we use TypedPipe's `groupBy` method.
// However, this no longer returns a `TypedPipe[T]`, but rather
// a `Grouped[K,T]` based on the type of the key used to group by.
//
// groupBy accepts a function to determine the key for grouping.
// In the case of word count, let's imagine we want to make sure
// capitalization doesn't matter, so to come up with the key,
// we normalize it to lower case.
val groups : Grouped[String,String] = words.groupBy(_.toLowerCase)
// Next we specify what to do with each aggregation. In the case
// of word count, we simply want the size of each group. This
// operation results in a new `Grouped` that has the key (String,
// the lower case words), and the counts (Long).
//
// Note: To do more interesting aggregations, Scalding supports
// a variety of operations, such as `sum`, `reduce`, `foldLeft`,
// `mapGroup`, etc, that can all be applied efficiently on Monoids
// (primitives like Long, container types like `Map`, or custom
// monoids you define yourself). See the wiki for more details:
// https://github.com/twitter/scalding/wiki/Type-safe-api-reference
val counts = groups.size
// And finally, we dump these results to a TypedTsv with the
// correct Tuple type.
counts.write(TypedTsv[(String,Long)](args("output")))
}
/**
Tutorial 5: Demonstrate joins
-----------------------------
Associate a score with each word and compute a score for each line.
Note: this example is a bit contrived, but serves to demonstrate
how to combine multiple input sources.
**/
case "5" | "join" => {
// Load the scores for each word from TSV file and group by word.
val scores: Grouped[String,Double] =
// For TypedTsv, Scalding coerces the fields to the specified types,
// throwing an exception if any line fails.
TypedPipe.from(TypedTsv[(String,Double)](args("words")))
// group by word so we can join it
.group
// get the lines, this time from an 'OffsetTextLine' which is a
// typed wrapper on 'TextLine' that contains the 'byte offset' and
// text of each line in the file.
val lines: TypedPipe[(Long,String)] = TypedPipe.from(OffsetTextLine(args("input")))
// Split lines into words, but keep their original line offset with them.
val wordsWithLine : Grouped[String,Long] =
lines
.flatMap{ case (offset, line) =>
// split into words
line.split("\\\\s")
// keep the line offset with them
.map(word => (word.toLowerCase, offset))
}
// make the 'word' field the key
.group
// Associate scores with each word; merges the two value types into
// a tuple: [String,Long] join [String,Double] -> [String,(Long,Double)]
val scoredWords = wordsWithLine.join(scores)
// get scores for each line (indexed by line number)
val scoredLinesByNumber =
scoredWords
// select the line offset and score fields
.map{ case (word,(offset,score)) => (offset,score) }
// group by line offset (groups all the words for a line together)
.group
// compute total score per line
.sum
// Group and sum are often run together in this way.
// The `sumByKey` operation performs performs both.
// Associate the original line text with the computed score,
// discard the 'offset' field
val scoredLines: TypedPipe[(String,Double)] =
lines
// index lines by 'offset'
.group
// associate scores with lines (by offset)
.join(scoredLinesByNumber)
// take just the value fields (discard the 'line offset')
.values
// write out the final result
scoredLines.write(TypedTsv[(String,Double)](args("output")))
}
/**
Interoperability with Fields API
--------------------------------
Scalding also provides a thinner, un-type-safe wrapper over Cascading
which is known as the Fields API because each record has a number of
named "fields".
Most jobs can be done completely in the Typed API, but for compatibility,
there are ways to go back and forth between the two schemes, which the
next couple cases demonstrate.
**/
/**
Pipe vs. TypedPipe
------------------
TypedPipes can be easily converted to Pipes and vice-versa.
**/
case "pipes" => {
// calling 'read' on a source returns an un-typed Pipe
// TextLine, by default, contains two fields: 'offset, and 'line.
val rawPipe: Pipe = TextLine(args("input")).read
// To convert to a typed pipe, we must specify the fields we want
// and their types:
val lines: TypedPipe[(Long,String)] =
TypedPipe.from[(Long,String)](rawPipe, ('offset,'line))
// We can operate on this typed pipe as above, and come up with a
// different set of fields
val lineSizes: TypedPipe[Long] = lines.map{ case (offset,line) => line.length }
// To convert back to a Fields Pipe, we must specify the names of the fields:
val lineSizesField: Pipe = lineSizes.toPipe('size)
// finally, we can write out this untyped pipe with an untyped sink:
lineSizesField.write(Tsv(args("output")))
}
/**
Bonus: Typed blocks
-------------------
An alternative to working completely in typed mode is to use
`typed` blocks, which create a TypedPipe within the scope, and then
map the output back into an untyped Pipe. You specify the fields to
map in and out using the `->` pair passed to `typed()`.
**/
case "block" => {
// Get the .typed enrichment
import TDsl._
TextLine(args("input")).read
.typed('line -> 'size) { tp: TypedPipe[String] =>
// now operate on the typed pipe
tp.map(_.length)
}
// the final output will have just the 'size field
// and can be dumped using the un-typed Tsv source.
.write(Tsv(args("output")))
}
}
}
|
Cascading/scalding-tutorial
|
src/main/scala/tutorial/TypedTutorial.scala
|
Scala
|
apache-2.0
| 9,561
|
package org.trustedanalytics.sparktk.frame.internal.ops.flatten
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.trustedanalytics.sparktk.frame.DataTypes
import org.trustedanalytics.sparktk.frame.DataTypes.DataType
import org.trustedanalytics.sparktk.frame.internal.{ FrameState, FrameTransform, BaseFrame }
trait FlattenColumnsTransform extends BaseFrame {
/**
* Spread data to multiple rows based on cell data.
*
* Splits cells in the specified columns into multiple rows according to a string delimiter. New rows are a full
* copy of the original row, but the specified columns only contain one value. The original row is deleted.
*
* @param columns The columns to be flattened, with an optional delimiter. The default delimiter is a comma (,).
*/
def flattenColumns(columns: List[(String, Option[String])]): Unit = {
execute(FlattenColumns(columns))
}
}
case class FlattenColumns(columns: List[(String, Option[String])]) extends FrameTransform {
require(columns != null && columns.nonEmpty, "column list is required")
columns.foreach {
case (columnName, delimiter) => require(StringUtils.isNotBlank(columnName), "non empty column names are required.")
}
override def work(state: FrameState): FrameState = {
var flattener: RDD[Row] => RDD[Row] = null
val columnInfo = columns.map(c => (state.schema.columnIndex(c._1), state.schema.columnDataType(c._1), c._2.getOrElse(",")))
var schema = state.schema
for (column <- columnInfo) {
column._2 match {
case DataTypes.vector(length) =>
schema = schema.convertType(column._1, DataTypes.float64)
case DataTypes.string => // pass; no nothing
case _ =>
val illegalDataType = column._2.toString
throw new IllegalArgumentException(s"Invalid column ('${schema.columnNames(column._1)}') data type provided: $illegalDataType. Only string or vector columns can be flattened.")
}
}
flattener = FlattenColumnsFunctions.flattenRddByColumnIndices(columnInfo)
// run the operation
val flattenedRDD = flattener(state.rdd)
// return result frame
FrameState(flattenedRDD, schema)
}
}
|
shibanis1/spark-tk
|
core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/ops/flatten/FlattenColumns.scala
|
Scala
|
apache-2.0
| 2,244
|
package systems.adaptix.bling.data
import scalikejdbc._
// TODO: EXISTS queries
/**
* A SelectionCriterion represents a constraint to be imposed on a selection from a given table in a database. This corresponds to the argument to an SQL WHERE clause.
* SelectionCriteria should be constructed using one of the following objects:
*
* 1. [[systems.adaptix.bling.data.Eq Eq]]
*
* 2. [[systems.adaptix.bling.data.Ne Ne]]
*
* 3. [[systems.adaptix.bling.data.Lt Lt]]
*
* 4. [[systems.adaptix.bling.data.Le Le]]
*
* 5. [[systems.adaptix.bling.data.Gt Gt]]
*
* 6. [[systems.adaptix.bling.data.Ge Ge]]
*
* 7. [[systems.adaptix.bling.data.Null Null]]
*
* 8. [[systems.adaptix.bling.data.NotNull NotNull]]
*
* 9. [[systems.adaptix.bling.data.In In]]
*
* 10. [[systems.adaptix.bling.data.And And]]
*
* 11. [[systems.adaptix.bling.data.Or Or]]
*
* 12. [[systems.adaptix.bling.data.Not Not]]
*
* Alternatively, if you need to specify through a method argument that there are no selection criteria, please pass the [[systems.adaptix.bling.data.NoCriterion]] object.
*
* Created by nkashyap on 6/7/15.
*/
sealed trait SelectionCriterion {
// TODO: There could be an issue here about how values are bound. Depends on the SQL parser. If parsing is done left-to-right, then what this method is doing now with compound constraints is alright.
/**
* Generates a String containing the SQL required to impose the constraint along with the variables to be bound to the SQL token when it is run on the database.
* @return (constraint SQL String, variables bound to the SQL token)
*/
def generateConstraints: (String, Seq[Any])
/**
* Returns the output of generateConstraints with the SQL String converted to scalikejdbc.SQLSyntax
* @return (constraint SQLSyntax, variables bound to the SQL token)
*/
def asSqlSyntaxWithValuesToBind: (SQLSyntax, Seq[Any]) = {
val (constraints, values) = generateConstraints
(SQLSyntax.createUnsafely(constraints), values)
}
}
/**
* This object signifies that there are no constraints to be placed upon a selection.
*/
object NoCriterion extends SelectionCriterion {
def generateConstraints = ("", Seq())
}
/**
* Eq generates a SelectionCriterion representing the constraint that the value in a given column be EQUAL to a given value.
*/
object Eq extends OrderConstraint
/**
* Ne generates a SelectionCriterion representing the constraint that the value in a given column be NOT EQUAL to a given value.
*/
object Ne extends OrderConstraint
/**
* Lt generates a SelectionCriterion representing the constraint that the value in a given column be LESS THAN a given value.
*/
object Lt extends OrderConstraint
/**
* Le generates a SelectionCriterion representing the constraint that the value in a given column be LESS THAN OR EQUAL TO a given value.
*/
object Le extends OrderConstraint
/**
* Gt generates a SelectionCriterion representing the constraint that the value in a given column be GREATER THAN a given value.
*/
object Gt extends OrderConstraint
/**
* Ge generates a SelectionCriterion representing the constraint that the value in a given column be GREATER THAN OR EQUAL TO a given value.
*/
object Ge extends OrderConstraint
/**
* The basis for the Eq, Ne, Lt, Le, Gt, Ge SelectionCriterion constructors.
*/
sealed trait OrderConstraint {
def asString = this match {
case Eq => "="
case Ne => "<>"
case Lt => "<"
case Le => "<="
case Gt => ">"
case Ge => ">="
}
/**
* @param column The name of the column which is being constrained.
* @param value The value imposed by the constraint.
* @return A SelectionCriterion object representing the desired constraint.
*/
def apply(column: String, value: Any): SelectionCriterion = OrderCriterion(this, column, value)
}
/**
* The SelectionCriterion generated by Eq, Ne, Lt, Le, Gt, Ge.
* @param constraintType A reference to the generating object, which specifies the kind of order constraint being imposed.
* @param column The name of the column which is being constrained.
* @param value The value imposed by the constraint.
*/
final case class OrderCriterion(constraintType: OrderConstraint, column: String, value: Any) extends SelectionCriterion {
def generateConstraints = (s"${column} ${constraintType.asString} ?", Seq(value))
}
/**
* Null generates a SelectionCriterion representing the constraint that the value in a given column be NULL.
*/
object Null {
def apply(column: String) = NullCriterion(true, column)
}
/**
* NotNull generates a SelectionCriterion representing the constraint that the value in the given column NOT be NULL.
*/
object NotNull {
def apply(column: String) = NullCriterion(false, column)
}
/**
* The SelectionCriterion generated by Null and NotNull.
* @param isNull Specifies whether the field in question is supposed to be Null or not.
* @param column The name of the column which is being subjected to the constraint.
*/
final case class NullCriterion(isNull: Boolean, column: String) extends SelectionCriterion {
def generateConstraints = (s"${column}" + {if (isNull) " IS NULL" else " IS NOT NULL"}, Seq[Any]())
}
/**
* In generates a SelectionCriterion representing the constraint that the value in a given column be present in a specified selection
* of columns from a given table (which may in turn be subject to its own [[systems.adaptix.bling.data.SelectionCriterion SelectionCriterion]]).
*/
object In {
/**
* @param column The column of the outer table the value of which should be in the inner selection.
* @param tableName The name of the table from which the inner selection is being made.
* @param tableColumns The column in the inner selection.
* @param tableCriterion The [[systems.adaptix.bling.data.SelectionCriterion SelectionCriterion]] for the inner selection.
* @return The SelectionCriterion representing the specified constraint.
*/
// TODO: The column argument of the apply method should really be of type DesiredColumns (and should accept multiple columns).
def apply(column: String, tableName: String, tableColumns: DesiredColumns = AllColumns, tableCriterion: SelectionCriterion = NoCriterion) = InCriterion(column, tableName, tableColumns, tableCriterion)
}
/**
* The SelectionCriterion generated by In.
* @param column The column of the outer table the value of which should be in the inner selection.
* @param tableName The name of the table from which the inner selection is being made.
* @param tableColumns The column in the inner selection.
* @param tableCriterion The [[systems.adaptix.bling.data.SelectionCriterion SelectionCriterion]] for the inner selection.
*/
final case class InCriterion(column: String, tableName: String, tableColumns: DesiredColumns, tableCriterion: SelectionCriterion) extends SelectionCriterion {
def generateConstraints = tableCriterion match {
case NoCriterion => (s"${column} IN (SELECT ${tableColumns.asString} FROM ${tableName})", Seq[Any]())
case _ => {
val (criterionString, criterionValuesToBind) = tableCriterion.generateConstraints
(s"${column} IN (SELECT ${tableColumns.asString} FROM ${tableName} WHERE ${criterionString})", criterionValuesToBind)
}
}
}
/**
* An object of this type represents a collection of columns which one wishes to select from a table in a database.
* A DesiredColumns objects is either the [[systems.adaptix.bling.data.AllColumns AllColumns]] singleton object or it is of type [[systems.adaptix.bling.data.SomeColumns SomeColumns]].
*/
sealed trait DesiredColumns {
def asString = this match {
case AllColumns => "*"
case desired: SomeColumns => desired.columns.mkString(", ")
}
}
/**
* Signifies that all columns in a table are desired. Equivalent to "*" in SQL.
*/
object AllColumns extends DesiredColumns
/**
* Contains the names of the columns one desires selected from a table in some specific order.
* @param columns A Seq[String] which specifies the desired selection format.
*/
final case class SomeColumns(columns: Seq[String]) extends DesiredColumns
/**
* Constructs SelectionCriteria which are either conjunctions or disjunctions of other SelectionCriteria.
* The two objects of Junction type are [[systems.adaptix.bling.data.And And]] and [[systems.adaptix.bling.data.Or Or]].
*/
sealed trait Junction {
/**
* A String representation of the type of Junction.
* @return "AND" for And and "OR" for Or.
*/
def asString = this match {
case And => "AND"
case Or => "OR"
}
/**
* @param componentCriteria The criteria which the caller desires to take a conjunction or disjunction of.
* @return The SelectionCriterion generated by making the desired conjunction or disjunction.
*/
def apply(componentCriteria: SelectionCriterion*): SelectionCriterion = new JunctiveCriterion(this, componentCriteria:_*)
}
/**
* Conjunctive [[systems.adaptix.bling.data.Junction Junction]].
*/
object And extends Junction
/**
* Disjunction [[systems.adaptix.bling.data.Junction Junction]].
*/
object Or extends Junction
/**
* SelectionCriterion generated by And and Or.
* @param junction Specifies whether a conjunction or disjunction is being made.
* @param componentCriteria The SelectionCriterion over which the conjunction or disjunction is to be taken.
*/
final class JunctiveCriterion(junction: Junction, componentCriteria: SelectionCriterion*) extends SelectionCriterion {
def generateConstraints = {
val (componentStrings, componentValues) = componentCriteria.map(_.generateConstraints).unzip
( "(" + componentStrings.mkString(s") ${junction.asString} (") + ")" , ( componentValues :\\ Seq[Any]() )(_ ++ _) )
}
}
/**
* Constructs a SelectionCriterion which is the negation of a given SelectionCriterion.
*/
object Not {
/**
* @param criterionToNegate The SelectionCriterion that the caller wishes negated.
* @return The negated SelectionCriterion.
*/
def apply(criterionToNegate: SelectionCriterion): SelectionCriterion = NegativeCriterion(criterionToNegate)
}
/**
* SelectionCriterion generated by Not.
* @param componentCriterion The SelectionCriterion to be negated.
*/
final case class NegativeCriterion(componentCriterion: SelectionCriterion) extends SelectionCriterion {
def generateConstraints = {
val (componentString, componentValues) = componentCriterion.generateConstraints
( s"NOT (${componentString})", componentValues)
}
}
|
nkashy1/bling
|
src/main/scala/systems/adaptix/bling/data/SelectionCriterion.scala
|
Scala
|
mit
| 10,479
|
package com.softwaremill.react.kafka
import java.util.Properties
import kafka.message.{DefaultCompressionCodec, NoCompressionCodec, SnappyCompressionCodec}
import kafka.producer.ProducerConfig
import kafka.serializer.Encoder
object ProducerProperties {
/**
* Producer Properties
*
* brokerList
* This is for bootstrapping and the producer will only use it for getting metadata (topics, partitions and replicas).
* The socket connections for sending the actual data will be established based on the broker information returned in
* the metadata. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a
* subset of brokers.
*
* topic
* The high-level API hides the details of brokers from the consumer and allows consuming off the cluster of machines
* without concern for the underlying topology. It also maintains the state of what has been consumed. The high-level API
* also provides the ability to subscribe to topics that match a filter expression (i.e., either a whitelist or a blacklist
* regular expression). This topic is a whitelist only but can change with re-factoring below on the filterSpec
*
* clientId
* The client id is a user-specified string sent in each request to help trace calls. It should logically identify
* the application making the request.
*
*/
def apply[T](
brokerList: String,
topic: String,
clientId: String,
encoder: Encoder[T],
partitionizer: T => Option[Array[Byte]]
): ProducerProperties[T] = {
val props = initialMap(brokerList, encoder, Some(clientId))
new ProducerProperties(props, topic, encoder, partitionizer)
}
def apply[T](brokerList: String, topic: String, clientId: String, encoder: Encoder[T]): ProducerProperties[T] = {
val props = initialMap(brokerList, encoder, Some(clientId))
new ProducerProperties(props, topic, encoder, (_: T) => None)
}
def apply[T](brokerList: String, topic: String, encoder: Encoder[T]): ProducerProperties[T] = {
val props = initialMap(brokerList, encoder, None)
new ProducerProperties(props, topic, encoder, (_: T) => None)
}
private def initialMap[T](brokerList: String, encoder: Encoder[T], clientIdOpt: Option[String]) = {
Map[String, String](
"metadata.broker.list" -> brokerList,
// defaults
"compression.codec" -> DefaultCompressionCodec.codec.toString,
"client.id" -> clientIdOpt.getOrElse(""),
"message.send.max.retries" -> 3.toString,
"request.required.acks" -> -1.toString,
"producer.type" -> "sync"
)
}
}
case class ProducerProperties[T](
private val params: Map[String, String],
topic: String,
encoder: Encoder[T],
partitionizer: T => Option[Array[Byte]] = (_: T) => None
) {
/**
* Asynchronous Mode
* The number of messages to send in one batch when using async mode.
* The producer will wait until either this number of messages are ready
* to send or bufferMaxMs timeout is reached.
*/
def asynchronous(batchSize: Int = 200, bufferMaxMs: Int = 500): ProducerProperties[T] = {
val p = params + (
"producer.type" -> "async",
"batch.num.messages" -> batchSize.toString,
"queue.buffering.max.ms" -> bufferMaxMs.toString
)
copy(params = p)
}
/**
* No Compression
* Allows you to turn off the compression codec for all data generated by this producer.
*/
def noCompression(): ProducerProperties[T] = {
copy(params = params + ("compression.codec" -> NoCompressionCodec.codec.toString))
}
/**
* Use Snappy Compression instead of the default compression
*/
def useSnappyCompression(): ProducerProperties[T] = {
copy(params = params + ("compression.codec" -> SnappyCompressionCodec.codec.toString))
}
/**
* messageSendMaxRetries
* This property will cause the producer to automatically retry a failed send request.
* This property specifies the number of retries when such failures occur. Note that
* setting a non-zero value here can lead to duplicates in the case of network errors
* that cause a message to be sent but the acknowledgment to be lost.
*/
def messageSendMaxRetries(num: Int): ProducerProperties[T] = {
copy(params = params + ("message.send.max.retries" -> num.toString))
}
/**
* requestRequiredAcks
* 0) which means that the producer never waits for an acknowledgment from the broker (the same behavior as 0.7).
* This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
* 1) which means that the producer gets an acknowledgment after the leader replica has received the data. This option provides
* better durability as the client waits until the server acknowledges the request as successful (only messages that were
* written to the now-dead leader but not yet replicated will be lost).
* -1) which means that the producer gets an acknowledgment after all in-sync replicas have received the data. This option
* provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
*/
def requestRequiredAcks(value: Int): ProducerProperties[T] = {
copy(params = params + ("request.required.acks" -> value.toString))
}
/**
* Set any additional properties as needed
*/
def setProperty(key: String, value: String): ProducerProperties[T] = copy(params = params + (key -> value))
def setProperties(values: (String, String)*): ProducerProperties[T] = copy(params = params ++ values)
/**
* Generate the Kafka ProducerConfig object
*
*/
def toProducerConfig: ProducerConfig = {
new ProducerConfig(params.foldLeft(new Properties()) { (props, param) => props.put(param._1, param._2); props })
}
/**
* Dump current props for debugging
*/
def dump: String = params.map { e => f"${e._1}%-20s : ${e._2.toString}" }.mkString("\\n")
}
|
blawlor/reactive-kafka
|
core/src/main/scala/com/softwaremill/react/kafka/ProducerProperties.scala
|
Scala
|
apache-2.0
| 5,998
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
import java.nio.file.Files
/** A class representing command line info for scalac */
class CompilerCommand(arguments: List[String], val settings: Settings) {
def this(arguments: List[String], error: String => Unit) = this(arguments, new Settings(error))
def this(arguments: List[String], settings: Settings, error: String => Unit) = this(arguments, settings withErrorFn error)
type Setting = Settings#Setting
private val processArgumentsResult =
if (shouldProcessArguments) processArguments
else (true, Nil)
def ok = processArgumentsResult._1
def files = processArgumentsResult._2
/** The name of the command. */
def cmdName = "scalac"
/** A descriptive alias for version and help messages. */
def cmdDesc = "compiler"
private def explainAdvanced = """
|-- Note --
|Boolean settings are false unless set: -Xdev -Xcheck-init:true -Xprompt:false
|Multi-valued settings are comma-separated: -Xlint:infer-any,unused,-missing-interpolator
|Phases are a list of names, ids, or ranges of ids: -Vprint:parser,typer,5-10 -Ylog:-4
|Use _ to enable all: -language:_ -Vprint:_
|
""".stripMargin.trim
def shortUsage = "Usage: %s <options> <source files>" format cmdName
/** Creates a help message for a subset of options based on cond */
def optionsMessage(cond: Setting => Boolean): String = {
val iswarning = cond(settings.warnUnused) // sordid check for if we're building -W warning help, to include lint and unused
val baseList = settings.visibleSettings.filter(cond).toList.sortBy(_.name)
val (deprecateds, theRest) = baseList.partition(_.isDeprecated)
def columnOneWidth(s: Setting): Int =
if (iswarning && (s == settings.lint || s == settings.warnUnused))
s.asInstanceOf[settings.MultiChoiceSetting[_]].choices.map(c => s"${s.name}:$c".length).max
else
s.helpSyntax.length
val width = baseList.map(columnOneWidth).max
val columnOneFormat = s"%-${width}s"
def format(s: String) = columnOneFormat.format(s)
def layout(c1: String, c2: String) = s"${format(c1)} ${c2}"
def helpStr(s: Setting) = {
val str = layout(s.helpSyntax, s.helpDescription)
val suffix = s.deprecationMessage match {
case Some(msg) => "\\n" + format("") + " deprecated: " + msg
case _ => ""
}
str + suffix
}
def appendDescriptions(sb: StringBuilder, msg: String, xs: List[Setting]): Unit =
if (!xs.isEmpty) {
val ss = xs.flatMap { s =>
if (iswarning && (s == settings.lint || s == settings.warnUnused)) {
val mcs = s.asInstanceOf[settings.MultiChoiceSetting[_]]
mcs.choices.map(c => s"${s.name}:$c").zipAll(mcs.descriptions, "", "").map {
case (c, d) => layout(c, d)
}
} else
List(helpStr(s))
}
sb.append(msg)
for (each <- ss) sb.append(" ").append(each).append("\\n")
}
val sb = new StringBuilder()
appendDescriptions(sb, "", theRest)
appendDescriptions(sb, "\\nDeprecated settings:\\n", deprecateds)
sb.toString
}
def createUsageMsg(label: String, explain: Boolean = true)(cond: Setting => Boolean): String = {
val explained = if (explain) s"\\n$explainAdvanced" else ""
s"$shortUsage\\n\\n$label options:\\n${optionsMessage(cond)}${explained}\\n"
}
/** Messages explaining usage and options */
def usageMsg = createUsageMsg("Standard", explain = false)(_.isStandard)
def vusageMsg = createUsageMsg("Verbose")(_.isVerbose)
def wusageMsg = createUsageMsg("Warnings")(_.isWarning)
def xusageMsg = createUsageMsg("Available advanced")(_.isAdvanced)
def yusageMsg = createUsageMsg("Available private")(_.isPrivate)
/** For info settings, compiler should just print a message and quit. */
def shouldStopWithInfo = settings.isInfo
def getInfoMessage(global: Global): String = {
import settings._
if (version) Properties.versionFor(cmdDesc)
else if (help) usageMsg + global.pluginOptionsHelp
else if (Vhelp) vusageMsg
else if (Whelp) wusageMsg
else if (Xhelp) xusageMsg
else if (Yhelp) yusageMsg
else if (showPlugins) global.pluginDescriptions
else if (showPhases) global.phaseDescriptions + (
if (debug) "\\n" + global.phaseFlagDescriptions else ""
)
else if (genPhaseGraph.isSetByUser) {
val components = global.phaseNames // global.phaseDescriptors // one initializes
s"Phase graph of ${components.size} components output to ${genPhaseGraph.value}*.dot."
}
else allSettings.valuesIterator.filter(_.isHelping).map(_.help).mkString("\\n\\n")
}
/**
* Expands all arguments starting with @ to the contents of the
* file named like each argument.
*/
def expandArg(arg: String): List[String] = {
def stripComment(s: String) = s takeWhile (_ != '#')
import java.nio.file._
import scala.jdk.CollectionConverters._
val file = Paths.get(arg stripPrefix "@")
if (!Files.exists(file))
throw new java.io.FileNotFoundException("argument file %s could not be found" format file)
settings splitParams (Files.readAllLines(file).asScala map stripComment mkString " ")
}
// override this if you don't want arguments processed here
def shouldProcessArguments: Boolean = true
def processArguments: (Boolean, List[String]) = {
// expand out @filename to the contents of that filename
val expandedArguments = arguments flatMap {
case x if x startsWith "@" => expandArg(x)
case x => List(x)
}
settings.processArguments(expandedArguments, processAll = true)
}
}
|
martijnhoekstra/scala
|
src/compiler/scala/tools/nsc/CompilerCommand.scala
|
Scala
|
apache-2.0
| 6,043
|
object MultiArray2 {
def test: Int = {
val b: Array[Array[Int]] = Array.fill(10)(Array.fill(10)(0))
b(0)(0) = 10
b(0)(0)
} ensuring(_ == 10)
}
|
regb/leon
|
src/test/resources/regression/verification/xlang/valid/MultiArray2.scala
|
Scala
|
gpl-3.0
| 164
|
/*
Author:
Kristal Curtis
*/
package siren.test
import org.scalatest._
import siren._
class UnionFindGridTest extends FunSuite with BeforeAndAfter {
val indexRange = (250L, 499L) // 0 to 249
val scanRange = (750L, 999L) // 250 to 499
val uf = new UnionFindGrid(indexRange, scanRange)
test("toIndex for pos in index range") {
assert(uf.toIndex(300) == 50)
}
test("toIndex for pos in scan range") {
assert(uf.toIndex(800) == (250 + 50))
}
}
|
fnothaft/siren-release
|
src/test/scala/UnionFindGridTest.scala
|
Scala
|
bsd-2-clause
| 474
|
/* Title: Pure/General/mercurial.scala
Author: Makarius
Support for Mercurial repositories, with local or remote repository clone
and working directory (via ssh connection).
*/
package isabelle
import java.io.{File => JFile}
import scala.annotation.tailrec
import scala.collection.mutable
object Mercurial
{
type Graph = isabelle.Graph[String, Unit]
/* command-line syntax */
def optional(s: String, prefix: String = ""): String =
if (s == "") "" else " " + prefix + " " + Bash.string(s)
def opt_flag(flag: String, b: Boolean): String = if (b) " " + flag else ""
def opt_rev(s: String): String = optional(s, "--rev")
def opt_template(s: String): String = optional(s, "--template")
/* repository access */
def is_repository(root: Path, ssh: SSH.System = SSH.Local): Boolean =
ssh.is_dir(root + Path.explode(".hg")) &&
new Repository(root, ssh).command("root").ok
def repository(root: Path, ssh: SSH.System = SSH.Local): Repository =
{
val hg = new Repository(root, ssh)
hg.command("root").check
hg
}
def find_repository(start: Path, ssh: SSH.System = SSH.Local): Option[Repository] =
{
def find(root: Path): Option[Repository] =
if (is_repository(root, ssh)) Some(repository(root, ssh = ssh))
else if (root.is_root) None
else find(root + Path.parent)
find(ssh.expand_path(start))
}
private def make_repository(root: Path, cmd: String, args: String, ssh: SSH.System = SSH.Local)
: Repository =
{
val hg = new Repository(root, ssh)
ssh.mkdirs(hg.root.dir)
hg.command(cmd, args, repository = false).check
hg
}
def init_repository(root: Path, ssh: SSH.System = SSH.Local): Repository =
make_repository(root, "init", ssh.bash_path(root), ssh = ssh)
def clone_repository(source: String, root: Path,
rev: String = "", options: String = "", ssh: SSH.System = SSH.Local): Repository =
make_repository(root, "clone",
options + " " + Bash.string(source) + " " + ssh.bash_path(root) + opt_rev(rev), ssh = ssh)
def setup_repository(source: String, root: Path, ssh: SSH.System = SSH.Local): Repository =
{
if (ssh.is_dir(root)) { val hg = repository(root, ssh = ssh); hg.pull(remote = source); hg }
else clone_repository(source, root, options = "--noupdate", ssh = ssh)
}
class Repository private[Mercurial](root_path: Path, ssh: SSH.System = SSH.Local)
{
hg =>
val root = ssh.expand_path(root_path)
def root_url: String = ssh.hg_url + root.implode
override def toString: String = ssh.prefix + root.implode
def command(name: String, args: String = "", options: String = "",
repository: Boolean = true): Process_Result =
{
val cmdline =
"export HGPLAIN=\\n\\"${HG:-hg}\\" --config " + Bash.string("defaults." + name + "=") +
(if (repository) " --repository " + ssh.bash_path(root) else "") +
" --noninteractive " + name + " " + options + " " + args
ssh.execute(cmdline)
}
def add(files: List[Path]): Unit =
hg.command("add", files.map(ssh.bash_path(_)).mkString(" "))
def archive(target: String, rev: String = "", options: String = ""): Unit =
hg.command("archive", opt_rev(rev) + " " + Bash.string(target), options).check
def heads(template: String = "{node|short}\\n", options: String = ""): List[String] =
hg.command("heads", opt_template(template), options).check.out_lines
def identify(rev: String = "tip", options: String = ""): String =
hg.command("id", opt_rev(rev), options).check.out_lines.headOption getOrElse ""
def id(rev: String = "tip"): String = identify(rev, options = "-i")
def manifest(rev: String = "", options: String = ""): List[String] =
hg.command("manifest", opt_rev(rev), options).check.out_lines
def log(rev: String = "", template: String = "", options: String = ""): String =
hg.command("log", opt_rev(rev) + opt_template(template), options).check.out
def parent(): String = log(rev = "p1()", template = "{node|short}")
def push(remote: String = "", rev: String = "", force: Boolean = false, options: String = "")
{
hg.command("push", opt_rev(rev) + opt_flag("--force", force) + optional(remote), options).
check_rc(rc => rc == 0 | rc == 1)
}
def pull(remote: String = "", rev: String = "", options: String = ""): Unit =
hg.command("pull", opt_rev(rev) + optional(remote), options).check
def update(
rev: String = "", clean: Boolean = false, check: Boolean = false, options: String = "")
{
hg.command("update",
opt_rev(rev) + opt_flag("--clean", clean) + opt_flag("--check", check), options).check
}
def known_files(): List[String] =
hg.command("status", options = "--modified --added --clean --no-status").check.out_lines
def graph(): Graph =
{
val Node = """^node: (\\w{12}) (\\w{12}) (\\w{12})""".r
val log_result =
log(template = """node: {node|short} {p1node|short} {p2node|short}\\n""")
(Graph.string[Unit] /: split_lines(log_result)) {
case (graph, Node(x, y, z)) =>
val deps = List(y, z).filterNot(s => s.forall(_ == '0'))
val graph1 = (graph /: (x :: deps))(_.default_node(_, ()))
(graph1 /: deps)({ case (g, dep) => g.add_edge(dep, x) })
case (graph, _) => graph
}
}
}
/* check files */
def check_files(files: List[Path], ssh: SSH.System = SSH.Local): (List[Path], List[Path]) =
{
val outside = new mutable.ListBuffer[Path]
val unknown = new mutable.ListBuffer[Path]
@tailrec def check(paths: List[Path])
{
paths match {
case path :: rest =>
find_repository(path, ssh) match {
case None => outside += path; check(rest)
case Some(hg) =>
val known =
hg.known_files().iterator.map(name =>
(hg.root + Path.explode(name)).canonical_file).toSet
if (!known(path.canonical_file)) unknown += path
check(rest.filterNot(p => known(p.canonical_file)))
}
case Nil =>
}
}
check(files)
(outside.toList, unknown.toList)
}
}
|
larsrh/libisabelle
|
modules/pide/2019-RC4/src/main/scala/General/mercurial.scala
|
Scala
|
apache-2.0
| 6,221
|
package org.jetbrains.plugins.scala
package annotator
import com.intellij.codeInsight.daemon.impl.AnnotationHolderImpl
import com.intellij.codeInsight.intention.IntentionAction
import com.intellij.codeInspection._
import com.intellij.lang.annotation._
import com.intellij.openapi.project.DumbAware
import com.intellij.openapi.roots.ProjectFileIndex
import com.intellij.openapi.util.{Key, TextRange}
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.annotator.createFromUsage._
import org.jetbrains.plugins.scala.annotator.importsTracker._
import org.jetbrains.plugins.scala.annotator.intention._
import org.jetbrains.plugins.scala.annotator.modifiers.ModifierChecker
import org.jetbrains.plugins.scala.annotator.quickfix._
import org.jetbrains.plugins.scala.annotator.template._
import org.jetbrains.plugins.scala.codeInspection.caseClassParamInspection.{RemoveValFromEnumeratorIntentionAction, RemoveValFromGeneratorIntentionAction}
import org.jetbrains.plugins.scala.components.HighlightingAdvisor
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.highlighter.{AnnotatorHighlighter, DefaultHighlighter}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base._
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScConstructorPattern, ScInfixPattern, ScPattern}
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression.ExpressionTypeResult
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter, ScParameters, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.{ImportUsed, ReadValueUsed, ValueUsed, WriteValueUsed}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.{ScImportExpr, ScImportSelector}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaElementVisitor, ScalaFile}
import org.jetbrains.plugins.scala.lang.psi.impl.expr.ScInterpolatedStringPartReference
import org.jetbrains.plugins.scala.lang.psi.impl.{ScalaPsiElementFactory, ScalaPsiManager}
import org.jetbrains.plugins.scala.lang.psi.light.scala.isLightScNamedElement
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypeResult, TypingContext, TypingContextOwner}
import org.jetbrains.plugins.scala.lang.resolve._
import org.jetbrains.plugins.scala.lang.resolve.processor.MethodResolveProcessor
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocResolvableCodeReference
import org.jetbrains.plugins.scala.lang.scaladoc.psi.impl.ScDocResolvableCodeReferenceImpl
import org.jetbrains.plugins.scala.project.{ProjectPsiElementExt, ScalaLanguageLevel}
import org.jetbrains.plugins.scala.util.ScalaUtils
import scala.collection.mutable.ArrayBuffer
import scala.collection.{Seq, Set, mutable}
/**
* User: Alexander Podkhalyuzin
* Date: 23.06.2008
*/
class ScalaAnnotator extends Annotator with FunctionAnnotator with ScopeAnnotator
with ParametersAnnotator with ApplicationAnnotator
with AssignmentAnnotator with VariableDefinitionAnnotator
with TypedStatementAnnotator with PatternDefinitionAnnotator
with PatternAnnotator with ConstructorAnnotator
with OverridingAnnotator with ValueClassAnnotator with DumbAware {
override def annotate(element: PsiElement, holder: AnnotationHolder) {
val typeAware = isAdvancedHighlightingEnabled(element)
val (compiled, isInSources) = element.getContainingFile match {
case file: ScalaFile =>
val isInSources: Boolean = ScalaUtils.isUnderSources(file)
if (isInSources && (element eq file)) {
if (typeAware) Stats.trigger("scala.file.with.type.aware.annotated")
else Stats.trigger("scala.file.without.type.aware.annotated")
}
(file.isCompiled, isInSources)
case _ => (false, false)
}
val visitor = new ScalaElementVisitor {
private def expressionPart(expr: ScExpression) {
if (!compiled) {
checkExpressionType(expr, holder, typeAware)
checkExpressionImplicitParameters(expr, holder)
ByNameParameter.annotate(expr, holder, typeAware)
}
if (isAdvancedHighlightingEnabled(element)) {
expr.getTypeExt(TypingContext.empty) match {
case ExpressionTypeResult(Success(t, _), _, Some(implicitFunction)) =>
highlightImplicitView(expr, implicitFunction, t, expr, holder)
case _ =>
}
}
}
override def visitParameterizedTypeElement(parameterized: ScParameterizedTypeElement) {
val tp = parameterized.typeElement.getTypeNoConstructor(TypingContext.empty)
tp match {
case Success(res, _) =>
ScType.extractDesignated(res, withoutAliases = false) match {
case Some((t: ScTypeParametersOwner, subst)) =>
val typeParametersLength = t.typeParameters.length
val argsLength = parameterized.typeArgList.typeArgs.length
if (typeParametersLength != argsLength) {
val error = "Wrong number of type parameters. Expected: " + typeParametersLength + ", actual: " + argsLength
val leftBracket = parameterized.typeArgList.getNode.findChildByType(ScalaTokenTypes.tLSQBRACKET)
if (leftBracket != null) {
val annotation = holder.createErrorAnnotation(leftBracket, error)
annotation.setHighlightType(ProblemHighlightType.ERROR)
}
val rightBracket = parameterized.typeArgList.getNode.findChildByType(ScalaTokenTypes.tRSQBRACKET)
if (rightBracket != null) {
val annotation = holder.createErrorAnnotation(rightBracket, error)
annotation.setHighlightType(ProblemHighlightType.ERROR)
}
}
case _ =>
}
case _ =>
}
super.visitParameterizedTypeElement(parameterized)
}
override def visitExpression(expr: ScExpression) {
expressionPart(expr)
super.visitExpression(expr)
}
override def visitMacroDefinition(fun: ScMacroDefinition): Unit = {
if (isInSources) Stats.trigger("scala.macro.definition")
super.visitMacroDefinition(fun)
}
override def visitReferenceExpression(ref: ScReferenceExpression) {
referencePart(ref)
visitExpression(ref)
}
override def visitEnumerator(enum: ScEnumerator) {
enum.valKeyword match {
case Some(valKeyword) =>
val annotation = holder.createWarningAnnotation(valKeyword, ScalaBundle.message("enumerator.val.keyword.deprecated"))
annotation.setHighlightType(ProblemHighlightType.LIKE_DEPRECATED)
annotation.registerFix(new RemoveValFromEnumeratorIntentionAction(enum))
case _ =>
}
super.visitEnumerator(enum)
}
override def visitGenerator(gen: ScGenerator) {
gen.valKeyword match {
case Some(valKeyword) =>
val annotation = holder.createWarningAnnotation(valKeyword, ScalaBundle.message("generator.val.keyword.removed"))
annotation.setHighlightType(ProblemHighlightType.LIKE_UNKNOWN_SYMBOL)
annotation.registerFix(new RemoveValFromGeneratorIntentionAction(gen))
case _ =>
}
super.visitGenerator(gen)
}
override def visitGenericCallExpression(call: ScGenericCall) {
//todo: if (typeAware) checkGenericCallExpression(call, holder)
super.visitGenericCallExpression(call)
}
override def visitTypeElement(te: ScTypeElement) {
checkTypeElementForm(te, holder)
super.visitTypeElement(te)
}
override def visitLiteral(l: ScLiteral) {
l match {
case interpolated: ScInterpolatedStringLiteral if l.getFirstChild != null =>
highlightWrongInterpolatedString(interpolated, holder)
case _ if l.getFirstChild.getNode.getElementType == ScalaTokenTypes.tINTEGER => // the literal is a tINTEGER
checkIntegerLiteral(l, holder)
case _ =>
}
super.visitLiteral(l)
}
override def visitAnnotation(annotation: ScAnnotation) {
checkAnnotationType(annotation, holder)
PrivateBeanProperty.annotate(annotation, holder)
super.visitAnnotation(annotation)
}
override def visitForExpression(expr: ScForStatement) {
checkForStmtUsedTypes(expr, holder)
super.visitForExpression(expr)
}
override def visitVariableDefinition(varr: ScVariableDefinition) {
annotateVariableDefinition(varr, holder, typeAware)
super.visitVariableDefinition(varr)
}
override def visitVariableDeclaration(varr: ScVariableDeclaration) {
checkAbstractMemberPrivateModifier(varr, varr.declaredElements.map(_.nameId), holder)
super.visitVariableDeclaration(varr)
}
override def visitTypedStmt(stmt: ScTypedStmt) {
annotateTypedStatement(stmt, holder, typeAware)
super.visitTypedStmt(stmt)
}
override def visitPatternDefinition(pat: ScPatternDefinition) {
if (!compiled) {
annotatePatternDefinition(pat, holder, typeAware)
}
super.visitPatternDefinition(pat)
}
override def visitPattern(pat: ScPattern) {
annotatePattern(pat, holder, typeAware)
super.visitPattern(pat)
}
override def visitMethodCallExpression(call: ScMethodCall) {
checkMethodCallImplicitConversion(call, holder)
if (typeAware) annotateMethodInvocation(call, holder)
super.visitMethodCallExpression(call)
}
override def visitInfixExpression(infix: ScInfixExpr): Unit = {
if (typeAware) annotateMethodInvocation(infix, holder)
super.visitInfixExpression(infix)
}
override def visitSelfInvocation(self: ScSelfInvocation) {
checkSelfInvocation(self, holder)
super.visitSelfInvocation(self)
}
override def visitConstrBlock(constr: ScConstrBlock) {
annotateAuxiliaryConstructor(constr, holder)
super.visitConstrBlock(constr)
}
override def visitParameter(parameter: ScParameter) {
annotateParameter(parameter, holder)
super.visitParameter(parameter)
}
override def visitCatchBlock(c: ScCatchBlock) {
checkCatchBlockGeneralizedRule(c, holder, typeAware)
super.visitCatchBlock(c)
}
override def visitFunctionDefinition(fun: ScFunctionDefinition) {
if (!compiled && !fun.isConstructor)
annotateFunction(fun, holder, typeAware)
super.visitFunctionDefinition(fun)
}
override def visitFunctionDeclaration(fun: ScFunctionDeclaration) {
checkAbstractMemberPrivateModifier(fun, Seq(fun.nameId), holder)
super.visitFunctionDeclaration(fun)
}
override def visitFunction(fun: ScFunction) {
if (typeAware && !compiled && fun.getParent.isInstanceOf[ScTemplateBody]) {
checkOverrideMethods(fun, holder, isInSources)
}
if (!fun.isConstructor) checkFunctionForVariance(fun, holder)
super.visitFunction(fun)
}
override def visitAssignmentStatement(stmt: ScAssignStmt) {
annotateAssignment(stmt, holder, typeAware)
super.visitAssignmentStatement(stmt)
}
override def visitTypeProjection(proj: ScTypeProjection) {
referencePart(proj)
visitTypeElement(proj)
}
override def visitUnderscoreExpression(under: ScUnderscoreSection) {
checkUnboundUnderscore(under, holder)
}
private def referencePart(ref: ScReferenceElement) {
if (typeAware) annotateReference(ref, holder)
ref.qualifier match {
case None => checkNotQualifiedReferenceElement(ref, holder)
case Some(_) => checkQualifiedReferenceElement(ref, holder)
}
}
override def visitReference(ref: ScReferenceElement) {
referencePart(ref)
super.visitReference(ref)
}
override def visitImportExpr(expr: ScImportExpr) {
checkImportExpr(expr, holder)
super.visitImportExpr(expr)
}
override def visitReturnStatement(ret: ScReturnStmt) {
checkExplicitTypeForReturnStatement(ret, holder)
super.visitReturnStatement(ret)
}
override def visitConstructor(constr: ScConstructor) {
if (typeAware) annotateConstructor(constr, holder)
super.visitConstructor(constr)
}
override def visitModifierList(modifierList: ScModifierList) {
ModifierChecker.checkModifiers(modifierList, holder)
super.visitModifierList(modifierList)
}
override def visitParameters(parameters: ScParameters) {
annotateParameters(parameters, holder)
super.visitParameters(parameters)
}
override def visitTypeDefinition(typedef: ScTypeDefinition) {
super.visitTypeDefinition(typedef)
}
override def visitExistentialTypeElement(exist: ScExistentialTypeElement): Unit = {
if (isInSources) Stats.trigger("scala.existential.type")
super.visitExistentialTypeElement(exist)
}
override def visitTypeAlias(alias: ScTypeAlias) {
if (typeAware && !compiled && alias.getParent.isInstanceOf[ScTemplateBody]) {
checkOverrideTypes(alias, holder)
}
if(!compoundType(alias)) checkBoundsVariance(alias, holder, alias.nameId, alias, checkTypeDeclaredSameBracket = false)
super.visitTypeAlias(alias)
}
override def visitVariable(varr: ScVariable) {
if (typeAware && !compiled && (varr.getParent.isInstanceOf[ScTemplateBody] ||
varr.getParent.isInstanceOf[ScEarlyDefinitions])) {
checkOverrideVars(varr, holder, isInSources)
}
varr.typeElement match {
case Some(typ) => checkBoundsVariance(varr, holder, typ, varr, checkTypeDeclaredSameBracket = false)
case _ =>
}
if (!childHasAnnotation(varr.typeElement, "uncheckedVariance")) {
checkValueAndVariableVariance(varr, ScTypeParam.Covariant, varr.declaredElements, holder)
checkValueAndVariableVariance(varr, ScTypeParam.Contravariant, varr.declaredElements, holder)
}
super.visitVariable(varr)
}
override def visitValueDeclaration(v: ScValueDeclaration) {
checkAbstractMemberPrivateModifier(v, v.declaredElements.map(_.nameId), holder)
super.visitValueDeclaration(v)
}
override def visitValue(v: ScValue) {
if (typeAware && !compiled && (v.getParent.isInstanceOf[ScTemplateBody] ||
v.getParent.isInstanceOf[ScEarlyDefinitions])) {
checkOverrideVals(v, holder, isInSources)
}
v.typeElement match {
case Some(typ) => checkBoundsVariance(v, holder, typ, v, checkTypeDeclaredSameBracket = false)
case _ =>
}
if (!childHasAnnotation(v.typeElement, "uncheckedVariance")) {
checkValueAndVariableVariance(v, ScTypeParam.Covariant, v.declaredElements, holder)
}
super.visitValue(v)
}
override def visitClassParameter(parameter: ScClassParameter) {
if (typeAware && !compiled) {
checkOverrideClassParameters(parameter, holder)
}
super.visitClassParameter(parameter)
}
override def visitClass(cl: ScClass): Unit = {
if (typeAware && ValueClassType.isValueClass(cl)) annotateValueClass(cl, holder)
super.visitClass(cl)
}
}
annotateScope(element, holder)
element.accept(visitor)
AnnotatorHighlighter.highlightElement(element, holder)
element match {
case templateDefinition: ScTemplateDefinition =>
checkBoundsVariance(templateDefinition, holder, templateDefinition.nameId, templateDefinition.nameId, ScTypeParam.Covariant)
val tdParts = Seq(AbstractInstantiation, FinalClassInheritance, IllegalInheritance, ObjectCreationImpossible,
MultipleInheritance, NeedsToBeAbstract, NeedsToBeMixin, NeedsToBeTrait, SealedClassInheritance, UndefinedMember,
ValueClassInheritance)
tdParts.foreach(_.annotate(templateDefinition, holder, typeAware))
templateDefinition match {
case cls: ScClass =>
val clsParts = Seq(CaseClassWithoutParamList)
clsParts.foreach(_.annotate(cls, holder, typeAware))
case trt: ScTrait =>
val traitParts = Seq(TraitHasImplicitBound)
traitParts.foreach(_.annotate(trt, holder, typeAware))
case _ =>
}
case _ =>
}
element match {
case sTypeParam: ScTypeBoundsOwner =>
checkTypeParamBounds(sTypeParam, holder)
case _ =>
}
//todo: super[ControlFlowInspections].annotate(element, holder)
}
def isAdvancedHighlightingEnabled(element: PsiElement): Boolean = {
if (!HighlightingAdvisor.getInstance(element.getProject).enabled) return false
element.getContainingFile match {
case file: ScalaFile =>
if (file.isCompiled) return false
val vFile = file.getVirtualFile
if (vFile != null && ProjectFileIndex.SERVICE.getInstance(element.getProject).isInLibrarySource(vFile)) return false
case _ =>
}
val containingFile = element.getContainingFile
def calculate(): mutable.HashSet[TextRange] = {
val text = containingFile.getText
val indexes = new ArrayBuffer[Int]
var lastIndex = 0
while (text.indexOf("/*_*/", lastIndex) >= 0) {
lastIndex = text.indexOf("/*_*/", lastIndex) + 5
indexes += lastIndex
}
if (indexes.isEmpty) return mutable.HashSet.empty
if (indexes.length % 2 != 0) indexes += text.length
val res = new mutable.HashSet[TextRange]
for (i <- indexes.indices by 2) {
res += new TextRange(indexes(i), indexes(i + 1))
}
res
}
var data = containingFile.getUserData(ScalaAnnotator.ignoreHighlightingKey)
val count = containingFile.getManager.getModificationTracker.getModificationCount
if (data == null || data._1 != count) {
data = (count, calculate())
containingFile.putUserData(ScalaAnnotator.ignoreHighlightingKey, data)
}
val offset = element.getTextOffset
data._2.forall(!_.contains(offset))
}
def checkCatchBlockGeneralizedRule(block: ScCatchBlock, holder: AnnotationHolder, typeAware: Boolean) {
block.expression match {
case Some(expr) =>
val tp = expr.getType(TypingContext.empty).getOrAny
val throwable = ScalaPsiManager.instance(expr.getProject).getCachedClass(expr.getResolveScope, "java.lang.Throwable").orNull
if (throwable == null) return
val throwableType = ScDesignatorType(throwable)
def checkMember(memberName: String, checkReturnTypeIsBoolean: Boolean) {
val processor = new MethodResolveProcessor(expr, memberName, List(Seq(new Compatibility.Expression(throwableType))),
Seq.empty, Seq.empty)
processor.processType(tp, expr)
val candidates = processor.candidates
if (candidates.length != 1) {
val error = ScalaBundle.message("method.is.not.member", memberName, ScType.presentableText(tp))
val annotation = holder.createErrorAnnotation(expr, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
} else if (checkReturnTypeIsBoolean) {
def error() {
val error = ScalaBundle.message("expected.type.boolean", memberName)
val annotation = holder.createErrorAnnotation(expr, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
}
candidates(0) match {
case ScalaResolveResult(fun: ScFunction, subst) =>
if (fun.returnType.isEmpty || !Equivalence.equiv(subst.subst(fun.returnType.get), psi.types.Boolean)) {
error()
}
case _ => error()
}
} else {
block.getContext match {
case t: ScTryStmt =>
t.expectedTypeEx(fromUnderscore = false) match {
case Some((tp: ScType, _)) if tp equiv psi.types.Unit => //do nothing
case Some((tp: ScType, typeElement)) =>
import org.jetbrains.plugins.scala.lang.psi.types._
val returnType = candidates(0) match {
case ScalaResolveResult(fun: ScFunction, subst) => fun.returnType.map(subst.subst)
case _ => return
}
val expectedType = Success(tp, None)
val conformance = ScalaAnnotator.smartCheckConformance(expectedType, returnType)
if (!conformance) {
if (typeAware) {
val (retTypeText, expectedTypeText) = ScTypePresentation.different(returnType.getOrNothing, expectedType.get)
val error = ScalaBundle.message("expr.type.does.not.conform.expected.type", retTypeText, expectedTypeText)
val annotation: Annotation = holder.createErrorAnnotation(expr, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
typeElement match {
//Don't highlight te if it's outside of original file.
case Some(te) if te.containingFile == t.containingFile =>
val fix = new ChangeTypeFix(te, returnType.getOrNothing)
annotation.registerFix(fix)
val teAnnotation = holder.createErrorAnnotation(te, null)
teAnnotation.setHighlightType(ProblemHighlightType.INFORMATION)
teAnnotation.registerFix(fix)
case None =>
}
}
}
case _ => //do nothing
}
case _ =>
}
}
}
checkMember("isDefinedAt", checkReturnTypeIsBoolean = true)
checkMember("apply", checkReturnTypeIsBoolean = false)
case _ =>
}
}
private def checkTypeParamBounds(sTypeParam: ScTypeBoundsOwner, holder: AnnotationHolder) {
for {
lower <- sTypeParam.lowerBound
upper <- sTypeParam.upperBound
if !Conformance.conforms(upper, lower)
annotation = holder.createErrorAnnotation(sTypeParam,
ScalaBundle.message("lower.bound.conform.to.upper", upper, lower))
} annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
}
private def registerUsedElement(element: PsiElement, resolveResult: ScalaResolveResult,
checkWrite: Boolean) {
val named = resolveResult.getActualElement match {
case isLightScNamedElement(e) => e
case e => e
}
val file = element.getContainingFile
if (named.isValid && named.getContainingFile == file &&
!PsiTreeUtil.isAncestor(named, element, true)) { //to filter recursive usages
val value: ValueUsed = element match {
case ref: ScReferenceExpression if checkWrite &&
ScalaPsiUtil.isPossiblyAssignment(ref.asInstanceOf[PsiElement]) => WriteValueUsed(named)
case _ => ReadValueUsed(named)
}
val holder = ScalaRefCountHolder.getInstance(file)
holder.registerValueUsed(value)
// For use of unapply method, see SCL-3463
resolveResult.parentElement.foreach(parent => holder.registerValueUsed(ReadValueUsed(parent)))
}
}
def checkBoundsVariance(toCheck: PsiElement, holder: AnnotationHolder, toHighlight: PsiElement, checkParentOf: PsiElement,
varianceOfUpper: Int = ScTypeParam.Covariant, checkTypeDeclaredSameBracket: Boolean = true, insideParameterized: Boolean = false) {
toCheck match {
case boundOwner: ScTypeBoundsOwner =>
checkAndHighlightBounds(boundOwner.upperTypeElement, varianceOfUpper)
checkAndHighlightBounds(boundOwner.lowerTypeElement, varianceOfUpper * -1)
case _ =>
}
toCheck match {
case paramOwner: ScTypeParametersOwner =>
val inParameterized = if (paramOwner.isInstanceOf[ScTemplateDefinition]) false else true
for (param <- paramOwner.typeParameters) {
checkBoundsVariance(param, holder, param.nameId, checkParentOf, varianceOfUpper * -1, insideParameterized = inParameterized)
}
case _ =>
}
def checkAndHighlightBounds(boundOption: Option[ScTypeElement], expectedVariance: Int) {
boundOption match {
case Some(bound) if !childHasAnnotation(Some(bound), "uncheckedVariance") =>
checkVariance(bound.calcType, expectedVariance, toHighlight, checkParentOf, holder, checkTypeDeclaredSameBracket, insideParameterized)
case _ =>
}
}
}
private def checkNotQualifiedReferenceElement(refElement: ScReferenceElement, holder: AnnotationHolder) {
refElement match {
case _: ScInterpolatedStringPartReference =>
return //do not inspect interpolated literal, it will be highlighted in other place
case _ =>
}
def getFix: Seq[IntentionAction] = {
val classes = ScalaImportTypeFix.getTypesToImport(refElement, refElement.getProject)
if (classes.length == 0) return Seq.empty
Seq[IntentionAction](new ScalaImportTypeFix(classes, refElement))
}
val resolve: Array[ResolveResult] = refElement.multiResolve(false)
def processError(countError: Boolean, fixes: => Seq[IntentionAction]) {
//todo remove when resolve of unqualified expression will be fully implemented
if (refElement.getManager.isInProject(refElement) && resolve.length == 0 &&
(fixes.length > 0 || countError)) {
val error = ScalaBundle.message("cannot.resolve", refElement.refName)
val annotation = holder.createErrorAnnotation(refElement.nameId, error)
annotation.setHighlightType(ProblemHighlightType.LIKE_UNKNOWN_SYMBOL)
registerAddImportFix(refElement, annotation, fixes: _*)
annotation.registerFix(ReportHighlightingErrorQuickFix)
registerCreateFromUsageFixesFor(refElement, annotation)
}
}
if (refElement.isSoft) {
return
}
val goodDoc = refElement.isInstanceOf[ScDocResolvableCodeReference] && resolve.length > 1
if (resolve.length != 1 && !goodDoc) {
if (resolve.length == 0) { //Let's try to hide dynamic named parameter usage
refElement match {
case e: ScReferenceExpression =>
e.getContext match {
case a: ScAssignStmt if a.getLExpression == e && a.isDynamicNamedAssignment => return
case _ =>
}
case _ =>
}
}
refElement match {
case e: ScReferenceExpression if e.getParent.isInstanceOf[ScPrefixExpr] &&
e.getParent.asInstanceOf[ScPrefixExpr].operation == e => //todo: this is hide !(Not Boolean)
case e: ScReferenceExpression if e.getParent.isInstanceOf[ScInfixExpr] &&
e.getParent.asInstanceOf[ScInfixExpr].operation == e => //todo: this is hide A op B
case e: ScReferenceExpression => processError(countError = false, fixes = getFix)
case e: ScStableCodeReferenceElement if e.getParent.isInstanceOf[ScInfixPattern] &&
e.getParent.asInstanceOf[ScInfixPattern].reference == e => //todo: this is hide A op B in patterns
case _ => refElement.getParent match {
case s: ScImportSelector if resolve.length > 0 =>
case _ => processError(countError = true, fixes = getFix)
}
}
} else {
AnnotatorHighlighter.highlightReferenceElement(refElement, holder)
def showError(): Unit = {
val error = ScalaBundle.message("forward.reference.detected")
val annotation = holder.createErrorAnnotation(refElement.nameId, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
resolve(0) match {
case r: ScalaResolveResult if r.isForwardReference =>
ScalaPsiUtil.nameContext(r.getActualElement) match {
case v: ScValue if !v.hasModifierProperty("lazy") => showError()
case _: ScVariable | _: ScObject => showError()
case _ => //todo: check forward references for functions, classes, lazy values
}
case _ =>
}
}
for {
result <- resolve if result.isInstanceOf[ScalaResolveResult]
scalaResult = result.asInstanceOf[ScalaResolveResult]
} {
registerUsedImports(refElement, scalaResult)
registerUsedElement(refElement, scalaResult, checkWrite = true)
}
checkAccessForReference(resolve, refElement, holder)
checkForwardReference(resolve, refElement, holder)
if (resolve.length == 1) {
val resolveResult = resolve(0).asInstanceOf[ScalaResolveResult]
refElement match {
case e: ScReferenceExpression if e.getParent.isInstanceOf[ScPrefixExpr] &&
e.getParent.asInstanceOf[ScPrefixExpr].operation == e =>
resolveResult.implicitFunction match {
case Some(fun) =>
val pref = e.getParent.asInstanceOf[ScPrefixExpr]
val expr = pref.operand
highlightImplicitMethod(expr, resolveResult, refElement, fun, holder)
case _ =>
}
case e: ScReferenceExpression if e.getParent.isInstanceOf[ScInfixExpr] &&
e.getParent.asInstanceOf[ScInfixExpr].operation == e =>
resolveResult.implicitFunction match {
case Some(fun) =>
val inf = e.getParent.asInstanceOf[ScInfixExpr]
val expr = if (inf.isLeftAssoc) inf.rOp else inf.lOp
highlightImplicitMethod(expr, resolveResult, refElement, fun, holder)
case _ =>
}
case _ =>
}
}
if (isAdvancedHighlightingEnabled(refElement) && resolve.length != 1 && !goodDoc) {
val parent = refElement.getParent
def addCreateApplyOrUnapplyFix(messageKey: String, fix: ScTypeDefinition => IntentionAction): Boolean = {
val refWithoutArgs = ScalaPsiElementFactory.createReferenceFromText(refElement.getText, parent.getContext, parent)
if (refWithoutArgs.multiResolve(false).exists(!_.getElement.isInstanceOf[PsiPackage])) {
// We can't resolve the method call A(arg1, arg2), but we can resolve A. Highlight this differently.
val error = ScalaBundle.message(messageKey, refElement.refName)
val annotation = holder.createErrorAnnotation(refElement.nameId, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
annotation.registerFix(ReportHighlightingErrorQuickFix)
refWithoutArgs match {
case ResolvesTo(obj: ScObject) => annotation.registerFix(fix(obj))
case InstanceOfClass(td: ScTypeDefinition) => annotation.registerFix(fix(td))
case _ =>
}
true
}
else false
}
refElement.getParent match {
case s: ScImportSelector if resolve.length > 0 => return
case mc: ScMethodCall =>
val messageKey = "cannot.resolve.apply.method"
if (addCreateApplyOrUnapplyFix(messageKey, td => new CreateApplyQuickFix(td, mc))) return
case Both(p: ScPattern, (_: ScConstructorPattern | _: ScInfixPattern)) =>
val messageKey = "cannot.resolve.unapply.method"
if (addCreateApplyOrUnapplyFix(messageKey, td => new CreateUnapplyQuickFix(td, p))) return
case _ =>
}
val error = ScalaBundle.message("cannot.resolve", refElement.refName)
val annotation = holder.createErrorAnnotation(refElement.nameId, error)
annotation.setHighlightType(ProblemHighlightType.LIKE_UNKNOWN_SYMBOL)
annotation.registerFix(ReportHighlightingErrorQuickFix)
registerCreateFromUsageFixesFor(refElement, annotation)
}
}
private def highlightImplicitMethod(expr: ScExpression, resolveResult: ScalaResolveResult, refElement: ScReferenceElement,
fun: PsiNamedElement, holder: AnnotationHolder) {
val typeTo = resolveResult.implicitType match {
case Some(tp) => tp
case _ => psi.types.Any
}
highlightImplicitView(expr, fun, typeTo, refElement.nameId, holder)
}
private def highlightImplicitView(expr: ScExpression, fun: PsiNamedElement, typeTo: ScType,
elementToHighlight: PsiElement, holder: AnnotationHolder) {
val range = elementToHighlight.getTextRange
val annotation: Annotation = holder.createInfoAnnotation(range, null)
annotation.setTextAttributes(DefaultHighlighter.IMPLICIT_CONVERSIONS)
annotation.setAfterEndOfLine(false)
}
private def checkSelfInvocation(self: ScSelfInvocation, holder: AnnotationHolder) {
self.bind match {
case Some(elem) =>
case None =>
if (isAdvancedHighlightingEnabled(self)) {
val annotation: Annotation = holder.createErrorAnnotation(self.thisElement,
"Cannot find constructor for this call")
annotation.setHighlightType(ProblemHighlightType.LIKE_UNKNOWN_SYMBOL)
}
}
}
private def checkQualifiedReferenceElement(refElement: ScReferenceElement, holder: AnnotationHolder) {
AnnotatorHighlighter.highlightReferenceElement(refElement, holder)
var resolve: Array[ResolveResult] = null
resolve = refElement.multiResolve(false)
for (result <- resolve if result.isInstanceOf[ScalaResolveResult];
scalaResult = result.asInstanceOf[ScalaResolveResult]) {
registerUsedImports(refElement, scalaResult)
registerUsedElement(refElement, scalaResult, checkWrite = true)
}
checkAccessForReference(resolve, refElement, holder)
if (refElement.isInstanceOf[ScExpression] &&
resolve.length == 1) {
val resolveResult = resolve(0).asInstanceOf[ScalaResolveResult]
resolveResult.implicitFunction match {
case Some(fun) =>
val qualifier = refElement.qualifier.get
val expr = qualifier.asInstanceOf[ScExpression]
highlightImplicitMethod(expr, resolveResult, refElement, fun, holder)
case _ =>
}
}
if (refElement.isInstanceOf[ScDocResolvableCodeReference] && resolve.length > 0 || refElement.isSoft) return
if (isAdvancedHighlightingEnabled(refElement) && resolve.length != 1) {
refElement.getParent match {
case _: ScImportSelector | _: ScImportExpr if resolve.length > 0 => return
case _ =>
}
val error = ScalaBundle.message("cannot.resolve", refElement.refName)
val annotation = holder.createErrorAnnotation(refElement.nameId, error)
annotation.setHighlightType(ProblemHighlightType.LIKE_UNKNOWN_SYMBOL)
annotation.registerFix(ReportHighlightingErrorQuickFix)
registerCreateFromUsageFixesFor(refElement, annotation)
}
}
private def checkForwardReference(resolve: Array[ResolveResult], refElement: ScReferenceElement, holder: AnnotationHolder) {
//todo: add check if it's legal to use forward reference
}
private def checkAccessForReference(resolve: Array[ResolveResult], refElement: ScReferenceElement, holder: AnnotationHolder) {
if (resolve.length != 1 || refElement.isSoft || refElement.isInstanceOf[ScDocResolvableCodeReferenceImpl]) return
resolve(0) match {
case r: ScalaResolveResult if !r.isAccessible =>
val error = "Symbol %s is inaccessible from this place".format(r.element.name)
val annotation = holder.createErrorAnnotation(refElement.nameId, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
//todo: add fixes
case _ =>
}
}
private def highlightWrongInterpolatedString(l: ScInterpolatedStringLiteral, holder: AnnotationHolder) {
val ref = l.findReferenceAt(0)
val prefix = l.getFirstChild
val injections = l.getInjections
ref match {
case _: ScInterpolatedStringPartReference =>
case _ => return
}
def annotateBadPrefix(key: String) {
val annotation = holder.createErrorAnnotation(prefix.getTextRange,
ScalaBundle.message(key, prefix.getText))
annotation.setHighlightType(ProblemHighlightType.LIKE_UNKNOWN_SYMBOL)
}
ref.resolve() match {
case r: ScFunction =>
val elementsMap = mutable.HashMap[Int, PsiElement]()
val params = new mutable.StringBuilder("(")
injections.foreach { i =>
elementsMap += params.length -> i
params.append(i.getText).append(",")
}
if (injections.length > 0) params.setCharAt(params.length - 1, ')') else params.append(')')
val expr = l.getStringContextExpression.get
val shift = expr match {
case ScMethodCall(invoked, _) => invoked.getTextRange.getEndOffset
case _ => return
}
val fakeAnnotator = new AnnotationHolderImpl(Option(holder.getCurrentAnnotationSession)
.getOrElse(new AnnotationSession(l.getContainingFile))) {
override def createErrorAnnotation(elt: PsiElement, message: String): Annotation =
createErrorAnnotation(elt.getTextRange, message)
override def createErrorAnnotation(range: TextRange, message: String): Annotation = {
holder.createErrorAnnotation(elementsMap.getOrElse(range.getStartOffset - shift, prefix), message)
}
}
annotateReference(expr.asInstanceOf[ScMethodCall].getEffectiveInvokedExpr.
asInstanceOf[ScReferenceElement], fakeAnnotator)
case _ => annotateBadPrefix("cannot.resolve.in.StringContext")
}
}
private def registerAddImportFix(refElement: ScReferenceElement, annotation: Annotation, actions: IntentionAction*) {
for (action <- actions) {
annotation.registerFix(action)
}
}
private def registerUsedImports(element: PsiElement, result: ScalaResolveResult) {
ImportTracker.getInstance(element.getProject).
registerUsedImports(element.getContainingFile.asInstanceOf[ScalaFile], result.importsUsed)
}
private def checkMethodCallImplicitConversion(call: ScMethodCall, holder: AnnotationHolder) {
val importUsed = call.getImportsUsed
ImportTracker.getInstance(call.getProject).
registerUsedImports(call.getContainingFile.asInstanceOf[ScalaFile], importUsed)
}
private def checkExpressionType(expr: ScExpression, holder: AnnotationHolder, typeAware: Boolean) {
def checkExpressionTypeInner(fromUnderscore: Boolean) {
val ExpressionTypeResult(exprType, importUsed, implicitFunction) =
expr.getTypeAfterImplicitConversion(expectedOption = expr.smartExpectedType(fromUnderscore),
fromUnderscore = fromUnderscore)
ImportTracker.getInstance(expr.getProject).
registerUsedImports(expr.getContainingFile.asInstanceOf[ScalaFile], importUsed)
expr match {
case m: ScMatchStmt =>
case bl: ScBlock if bl.lastStatement != None =>
case i: ScIfStmt if i.elseBranch != None =>
case fun: ScFunctionExpr =>
case tr: ScTryStmt =>
case _ =>
expr.getParent match {
case a: ScAssignStmt if a.getRExpression == Some(expr) && a.isDynamicNamedAssignment => return
case args: ScArgumentExprList => return
case inf: ScInfixExpr if inf.getArgExpr == expr => return
case tuple: ScTuple if tuple.getContext.isInstanceOf[ScInfixExpr] &&
tuple.getContext.asInstanceOf[ScInfixExpr].getArgExpr == tuple => return
case e: ScParenthesisedExpr if e.getContext.isInstanceOf[ScInfixExpr] &&
e.getContext.asInstanceOf[ScInfixExpr].getArgExpr == e => return
case t: ScTypedStmt if t.isSequenceArg => return
case parent@(_: ScTuple | _: ScParenthesisedExpr) =>
parent.getParent match {
case inf: ScInfixExpr if inf.getArgExpr == parent => return
case _ =>
}
case param: ScParameter =>
if (!param.isDefaultParam) return //performance optimization
param.getRealParameterType() match {
case Success(paramType, _) if !paramType.isGenericType(Option(expr.getProject)) =>
//do not check generic types. See SCL-3508
case _ => return
}
case ass: ScAssignStmt if ass.isNamedParameter => return //that's checked in application annotator
case _ =>
}
expr.expectedTypeEx(fromUnderscore) match {
case Some((tp: ScType, _)) if tp equiv psi.types.Unit => //do nothing
case Some((tp: ScType, typeElement)) =>
import org.jetbrains.plugins.scala.lang.psi.types._
val expectedType = Success(tp, None)
implicitFunction match {
case Some(fun) =>
//todo:
/*val typeFrom = expr.getType(TypingContext.empty).getOrElse(Any)
val typeTo = exprType.getOrElse(Any)
val exprText = expr.getText
val range = expr.getTextRange
showImplicitUsageAnnotation(exprText, typeFrom, typeTo, fun, range, holder,
EffectType.LINE_UNDERSCORE, Color.LIGHT_GRAY)*/
case None => //do nothing
}
val conformance = ScalaAnnotator.smartCheckConformance(expectedType, exprType)
if (!conformance) {
if (typeAware) {
val markedPsi = (expr, expr.getParent) match {
case (b: ScBlockExpr, _) => b.getRBrace.map(_.getPsi).getOrElse(expr)
case (_, b: ScBlockExpr) => b.getRBrace.map(_.getPsi).getOrElse(expr)
case _ => expr
}
val (exprTypeText, expectedTypeText) = ScTypePresentation.different(exprType.getOrNothing, expectedType.get)
val error = ScalaBundle.message("expr.type.does.not.conform.expected.type", exprTypeText, expectedTypeText)
val annotation: Annotation = holder.createErrorAnnotation(markedPsi, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
if (WrapInOptionQuickFix.isAvailable(expr, expectedType, exprType)) {
val wrapInOptionFix = new WrapInOptionQuickFix(expr, expectedType, exprType)
annotation.registerFix(wrapInOptionFix)
}
if (AddBreakoutQuickFix.isAvailable(expr)) {
annotation.registerFix(new AddBreakoutQuickFix(expr))
}
typeElement match {
case Some(te) if te.getContainingFile == expr.getContainingFile =>
val fix = new ChangeTypeFix(te, exprType.getOrNothing)
annotation.registerFix(fix)
val teAnnotation = holder.createErrorAnnotation(te, null)
teAnnotation.setHighlightType(ProblemHighlightType.INFORMATION)
teAnnotation.registerFix(fix)
case _ =>
}
}
}
case _ => //do nothing
}
}
}
if (ScUnderScoreSectionUtil.isUnderscoreFunction(expr)) {
checkExpressionTypeInner(fromUnderscore = true)
}
checkExpressionTypeInner(fromUnderscore = false)
}
private def checkExpressionImplicitParameters(expr: ScExpression, holder: AnnotationHolder) {
expr.findImplicitParameters match {
case Some(seq) =>
for (resolveResult <- seq) {
if (resolveResult != null) {
ImportTracker.getInstance(expr.getProject).registerUsedImports(
expr.getContainingFile.asInstanceOf[ScalaFile], resolveResult.importsUsed)
registerUsedElement(expr, resolveResult, checkWrite = false)
}
}
case _ =>
}
}
private def checkUnboundUnderscore(under: ScUnderscoreSection, holder: AnnotationHolder) {
if (under.getText == "_") {
ScalaPsiUtil.getParentOfType(under, classOf[ScVariableDefinition]) match {
case varDef @ ScVariableDefinition.expr(expr) if varDef.expr == Some(under) =>
if (varDef.containingClass == null) {
val error = ScalaBundle.message("local.variables.must.be.initialized")
val annotation: Annotation = holder.createErrorAnnotation(under, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
} else if (varDef.typeElement.isEmpty) {
val error = ScalaBundle.message("unbound.placeholder.parameter")
val annotation: Annotation = holder.createErrorAnnotation(under, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
case _ =>
// TODO SCL-2610 properly detect unbound placeholders, e.g. ( { _; (_: Int) } ) and report them.
// val error = ScalaBundle.message("unbound.placeholder.parameter")
// val annotation: Annotation = holder.createErrorAnnotation(under, error)
// annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
}
}
private def checkExplicitTypeForReturnStatement(ret: ScReturnStmt, holder: AnnotationHolder) {
val fun: ScFunction = PsiTreeUtil.getParentOfType(ret, classOf[ScFunction])
fun match {
case null =>
val error = ScalaBundle.message("return.outside.method.definition")
val annotation: Annotation = holder.createErrorAnnotation(ret.returnKeyword, error)
annotation.setHighlightType(ProblemHighlightType.LIKE_UNKNOWN_SYMBOL)
case _ if !fun.hasAssign || fun.returnType.exists(_ == psi.types.Unit) =>
return
case _ => fun.returnTypeElement match {
case Some(x: ScTypeElement) =>
import org.jetbrains.plugins.scala.lang.psi.types._
val funType = fun.returnType
funType match {
case Success(tp: ScType, _) if tp equiv psi.types.Unit => return //nothing to check
case _ =>
}
val ExpressionTypeResult(_, importUsed, _) = ret.expr match {
case Some(e: ScExpression) => e.getTypeAfterImplicitConversion()
case None => ExpressionTypeResult(Success(psi.types.Unit, None), Set.empty, None)
}
ImportTracker.getInstance(ret.getProject).registerUsedImports(ret.getContainingFile.asInstanceOf[ScalaFile], importUsed)
case _ =>
}
}
}
private def checkForStmtUsedTypes(f: ScForStatement, holder: AnnotationHolder) {
ImportTracker.getInstance(f.getProject).registerUsedImports(f.getContainingFile.asInstanceOf[ScalaFile],
ScalaPsiUtil.getExprImports(f))
}
private def checkImportExpr(impExpr: ScImportExpr, holder: AnnotationHolder) {
if (impExpr.qualifier == null) {
val annotation: Annotation = holder.createErrorAnnotation(impExpr.getTextRange,
ScalaBundle.message("import.expr.should.be.qualified"))
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
}
}
private def checkTypeElementForm(typeElement: ScTypeElement, holder: AnnotationHolder) {
//todo: check bounds conformance for parameterized type
typeElement match {
case simpleTypeElement: ScSimpleTypeElement =>
simpleTypeElement.findImplicitParameters match {
case Some(parameters) =>
parameters.foreach {
case r: ScalaResolveResult =>
registerUsedImports(typeElement, r)
case null =>
}
case _ =>
}
case _ =>
}
}
private def checkAnnotationType(annotation: ScAnnotation, holder: AnnotationHolder) {
//todo: check annotation is inheritor for class scala.Annotation
}
def childHasAnnotation(teOption: Option[ScTypeElement], annotation: String): Boolean = teOption match {
case Some(te) => te.breadthFirst.exists {
case annot: ScAnnotationExpr =>
annot.constr.reference match {
case Some(ref) => Option(ref.resolve()) match {
case Some(res: PsiNamedElement) => res.getName == annotation
case _ => false
}
case _ => false
}
case _ => false
}
case _ => false
}
private def checkFunctionForVariance(fun: ScFunction, holder: AnnotationHolder) {
if (!modifierIsThis(fun) && !compoundType(fun)) { //if modifier contains [this] or if it is a compound type we do not highlight it
checkBoundsVariance(fun, holder, fun.nameId, fun.getParent)
if (!childHasAnnotation(fun.returnTypeElement, "uncheckedVariance")) {
fun.returnType match {
case Success(returnType, _) =>
checkVariance(ScType.expandAliases(returnType).getOrType(returnType), ScTypeParam.Covariant, fun.nameId,
fun.getParent, holder)
case _ =>
}
}
for (parameter <- fun.parameters) {
parameter.typeElement match {
case Some(te) if !childHasAnnotation(Some(te), "uncheckedVariance") =>
checkVariance(ScType.expandAliases(te.calcType).getOrType(te.calcType), ScTypeParam.Contravariant,
parameter.nameId, fun.getParent, holder)
case _ =>
}
}
}
}
def checkValueAndVariableVariance(toCheck: ScDeclaredElementsHolder, variance: Int,
declaredElements: Seq[TypingContextOwner with ScNamedElement], holder: AnnotationHolder) {
if (!modifierIsThis(toCheck)) {
for (element <- declaredElements) {
element.getType() match {
case Success(tp, _) =>
ScType.expandAliases(tp) match { //so type alias is highlighted
case Success(newTp, _) => checkVariance(newTp, variance, element.nameId, toCheck, holder)
case _ => checkVariance(tp, variance, element.nameId, toCheck, holder)
}
case _ =>
}
}
}
}
def modifierIsThis(toCheck: PsiElement): Boolean = {
toCheck match {
case modifierOwner: ScModifierListOwner => modifierOwner.getModifierList.accessModifier.exists(_.isThis)
case _ => false
}
}
def compoundType(toCheck: PsiElement): Boolean = {
toCheck.getParent.getParent match {
case _: ScCompoundTypeElement => true
case _ => false
}
}
//fix for SCL-807
private def checkVariance(typeParam: ScType, variance: Int, toHighlight: PsiElement, checkParentOf: PsiElement,
holder: AnnotationHolder, checkIfTypeIsInSameBrackets: Boolean = false, insideParameterized: Boolean = false) = {
def highlightVarianceError(varianceOfElement: Int, varianceOfPosition: Int, name: String) = {
if (varianceOfPosition != varianceOfElement && varianceOfElement != ScTypeParam.Invariant) {
val pos =
if (toHighlight.isInstanceOf[ScVariable]) toHighlight.getText + "_="
else toHighlight.getText
val place = if (toHighlight.isInstanceOf[ScFunction]) "method" else "value"
val elementVariance =
if (varianceOfElement == 1) "covariant"
else "contravariant"
val posVariance =
if (varianceOfPosition == 1) "covariant"
else if (varianceOfPosition == -1) "contravariant"
else "invariant"
val annotation = holder.createErrorAnnotation(toHighlight,
ScalaBundle.message(s"$elementVariance.type.$posVariance.position.of.$place", name, typeParam.toString, pos))
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
}
}
def functionToSendIn(tp: ScType, i: Int) = {
tp match {
case paramType: ScTypeParameterType =>
paramType.param match {
case scTypeParam: ScTypeParam =>
val compareTo = scTypeParam.owner
val parentIt = checkParentOf.parents
//if it's a function inside function we do not highlight it unless trait or class is defined inside this function
parentIt.find(e => e == compareTo || e.isInstanceOf[ScFunction]) match {
case Some(_: ScFunction) =>
case _ =>
def findVariance: Int = {
if (!checkIfTypeIsInSameBrackets) return i
if (PsiTreeUtil.isAncestor(scTypeParam.getParent, toHighlight, false))
//we do not highlight element if it was declared inside parameterized type.
if (!scTypeParam.getParent.getParent.isInstanceOf[ScTemplateDefinition]) return scTypeParam.variance
else return i * -1
if (toHighlight.getParent == scTypeParam.getParent.getParent) return i * -1
i
}
highlightVarianceError(scTypeParam.variance, findVariance, paramType.name)
}
case _ =>
}
case _ =>
}
(false, tp)
}
typeParam.recursiveVarianceUpdate(functionToSendIn, variance)
}
//fix for SCL-7176
private def checkAbstractMemberPrivateModifier(element: PsiElement, toHighlight: Seq[PsiElement], holder: AnnotationHolder) {
element match {
case modOwner: ScModifierListOwner =>
modOwner.getModifierList.accessModifier match {
case Some(am) if am.isUnqualifiedPrivateOrThis =>
for (e <- toHighlight) {
val annotation = holder.createErrorAnnotation(e, ScalaBundle.message("abstract.member.not.have.private.modifier"))
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
}
case _ =>
}
case _ =>
}
}
private def checkIntegerLiteral(literal: ScLiteral, holder: AnnotationHolder) {
val child = literal.getFirstChild.getNode
val text = literal.getText
val endsWithL = child.getText.endsWith('l') || child.getText.endsWith('L')
val textWithoutL = if (endsWithL) text.substring(0, text.length - 1) else text
val parent = literal.getParent
val scalaVersion = literal.scalaLanguageLevel
val isNegative = parent match {
// only "-1234" is negative, "- 1234" should be considered as positive 1234
case prefixExpr: ScPrefixExpr if prefixExpr.getChildren.size == 2 && prefixExpr.getFirstChild.getText == "-" => true
case _ => false
}
val (number, base) = textWithoutL match {
case t if t.startsWith("0x") || t.startsWith("0X") => (t.substring(2), 16)
case t if t.startsWith("0") && t.length >= 2 => (t.substring(1), 8)
case t => (t, 10)
}
// parse integer literal. the return is (Option(value), statusCode)
// the Option(value) will be the real integer represented by the literal, if it cannot fit in Long, It's None
// there is 3 value for statusCode:
// 0 -> the literal can fit in Int
// 1 -> the literal can fit in Long
// 2 -> the literal cannot fit in Long
def parseIntegerNumber(text: String, isNegative: Boolean): (Option[Long], Byte) = {
var value = 0l
val divider = if (base == 10) 1 else 2
var statusCode: Byte = 0
val limit = java.lang.Long.MAX_VALUE
val intLimit = java.lang.Integer.MAX_VALUE
var i = 0
for (d <- number.map(_.asDigit)) {
if (value > intLimit ||
intLimit / (base / divider) < value ||
intLimit - (d / divider) < value * (base / divider) &&
// This checks for -2147483648, value is 214748364, base is 10, d is 8. This check returns false.
// base 8 and 16 won't have this check because the divider is 2 .
!(isNegative && intLimit == value * base - 1 + d)) {
statusCode = 1
}
if (value < 0 ||
limit / (base / divider) < value ||
limit - (d / divider) < value * (base / divider) &&
// This checks for Long.MinValue, same as the the previous Int.MinValue check.
!(isNegative && limit == value * base - 1 + d)) {
return (None, 2)
}
value = value * base + d
i += 1
}
value = if (isNegative) -value else value
if (statusCode == 0) (Some(value.toInt), 0) else (Some(value), statusCode)
}
if (base == 8) {
val convertFix = new ConvertOctalToHexFix(literal)
scalaVersion match {
case Some(ScalaLanguageLevel.Scala_2_10) =>
val deprecatedMeaasge = "Octal number is deprecated in Scala-2.10 and will be removed in Scala-2.11"
val annotation = holder.createWarningAnnotation(literal, deprecatedMeaasge)
annotation.setHighlightType(ProblemHighlightType.LIKE_DEPRECATED)
annotation.registerFix(convertFix)
case Some(version) if version >= ScalaLanguageLevel.Scala_2_11 =>
val error = "Octal number is removed in Scala-2.11 and after"
val annotation = holder.createErrorAnnotation(literal, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR)
annotation.registerFix(convertFix)
return
case _ =>
}
}
val (_, status) = parseIntegerNumber(number, isNegative)
if (status == 2) { // the Integer number is out of range even for Long
val error = "Integer number is out of range even for type Long"
val annotation = holder.createErrorAnnotation(literal, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
} else {
if (status == 1 && !endsWithL) {
val error = "Integer number is out of range for type Int"
val annotation = if (isNegative) holder.createErrorAnnotation(parent, error) else holder.createErrorAnnotation(literal, error)
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
val bigIntType = ScalaPsiElementFactory.createTypeFromText("_root_.scala.math.BigInt", literal.getContext, literal)
val conformsToTypeList = List(Long, bigIntType)
val shouldRegisterFix = if (isNegative)
parent.asInstanceOf[ScPrefixExpr].expectedType().map(x => conformsToTypeList.exists(_.weakConforms(x))).getOrElse(true)
else literal.expectedType().map(x => conformsToTypeList.exists(_.weakConforms(x))).getOrElse(true)
if (shouldRegisterFix) {
val addLtoLongFix: AddLToLongLiteralFix = new AddLToLongLiteralFix(literal)
annotation.registerFix(addLtoLongFix)
}
}
}
}
}
object ScalaAnnotator {
val ignoreHighlightingKey: Key[(Long, mutable.HashSet[TextRange])] = Key.create("ignore.highlighting.key")
val usedImportsKey: Key[mutable.HashSet[ImportUsed]] = Key.create("used.imports.key")
/**
* This method will return checked conformance if it's possible to check it.
* In other way it will return true to avoid red code.
* Check conformance in case l = r.
*/
def smartCheckConformance(l: TypeResult[ScType], r: TypeResult[ScType]): Boolean = {
val leftType = l match {
case Success(res, _) => res
case _ => return true
}
val rightType = r match {
case Success(res, _) => res
case _ => return true
}
Conformance.conforms(leftType, rightType)
}
}
|
JetBrains/intellij-scala-historical
|
src/org/jetbrains/plugins/scala/annotator/ScalaAnnotator.scala
|
Scala
|
apache-2.0
| 60,381
|
package com.sageserpent.americium.java
import cats.implicits._
import cats.{Functor, Semigroupal}
import com.sageserpent.americium.java.{
Trials => JavaTrials,
TrialsScaffolding => JavaTrialsScaffolding
}
import cyclops.data.tuple.{
Tuple2 => JavaTuple2,
Tuple3 => JavaTuple3,
Tuple4 => JavaTuple4
}
import cyclops.function.{Consumer3, Consumer4}
import _root_.java.util.{Iterator => JavaIterator}
import java.util.function.{BiConsumer, Consumer}
object tupleTrials {
implicit val functorInstance: Functor[JavaTrials] = new Functor[JavaTrials] {
override def map[A, B](fa: JavaTrials[A])(
f: A => B
): JavaTrials[B] = fa.map(f.apply _)
}
implicit val semigroupalInstance: Semigroupal[JavaTrials] =
new Semigroupal[JavaTrials] {
override def product[A, B](
fa: JavaTrials[A],
fb: JavaTrials[B]
): JavaTrials[(A, B)] = for {
a <- fa
b <- fb
} yield a -> b
}
// TODO: something clever with Magnolia, `HList`, something else in Shapeless
// or just raw macros that saves the bother of churning out lots of copied
// boilerplate...
// TODO: something equally as clever where these implementations use ByteBuddy
// to delegate to the trials of tuple instances, and when that is done, we can
// hoist *most* of the orphaned methods in `Trials` up to `TrialsScaffolding`
// - well, all but the `and` combinator. I'm not going near that last one.
class Tuple2Trials[Case1, Case2](
firstTrials: JavaTrials[Case1],
secondTrials: JavaTrials[Case2]
) extends JavaTrialsScaffolding.Tuple2Trials[Case1, Case2] {
private def trialsOfPairs: JavaTrials[JavaTuple2[Case1, Case2]] =
(firstTrials, secondTrials).mapN(JavaTuple2.of[Case1, Case2])
override def reproduce(
recipe: String
): JavaTuple2[Case1, Case2] = trialsOfPairs.reproduce(recipe)
override def and[Case3](
thirdTrials: JavaTrials[Case3]
): JavaTrialsScaffolding.Tuple3Trials[Case1, Case2, Case3] =
new Tuple3Trials(firstTrials, secondTrials, thirdTrials)
trait SupplyToSyntaxTuple2
extends JavaTrialsScaffolding.Tuple2Trials.SupplyToSyntaxTuple2[
Case1,
Case2
] {
def supplyTo(biConsumer: BiConsumer[Case1, Case2]): Unit = {
supplyTo((pair: JavaTuple2[Case1, Case2]) =>
biConsumer.accept(pair._1, pair._2)
)
}
override def supplyTo(
consumer: Consumer[JavaTuple2[Case1, Case2]]
): Unit = {
supplyToSyntax.supplyTo(consumer)
}
override def asIterator: JavaIterator[JavaTuple2[Case1, Case2]] =
supplyToSyntax.asIterator
protected val supplyToSyntax: TrialsScaffolding.SupplyToSyntax[
JavaTuple2[Case1, Case2]
]
}
override def withLimit(
limit: Int
): SupplyToSyntaxTuple2 = new SupplyToSyntaxTuple2 {
val supplyToSyntax
: JavaTrialsScaffolding.SupplyToSyntax[JavaTuple2[Case1, Case2]] =
trialsOfPairs.withLimit(limit)
}
override def withLimit(
limit: Int,
complexityLimit: Int
): SupplyToSyntaxTuple2 = new SupplyToSyntaxTuple2 {
val supplyToSyntax
: JavaTrialsScaffolding.SupplyToSyntax[JavaTuple2[Case1, Case2]] =
trialsOfPairs.withLimit(limit, complexityLimit)
}
override def withLimits(
casesLimit: Int,
optionalLimits: TrialsScaffolding.OptionalLimits
): SupplyToSyntaxTuple2 = new SupplyToSyntaxTuple2 {
val supplyToSyntax
: JavaTrialsScaffolding.SupplyToSyntax[JavaTuple2[Case1, Case2]] =
trialsOfPairs.withLimits(casesLimit, optionalLimits)
}
override def withLimits(
casesLimit: Int,
optionalLimits: TrialsScaffolding.OptionalLimits,
shrinkageStop: TrialsScaffolding.ShrinkageStop[
_ >: JavaTuple2[Case1, Case2]
]
): SupplyToSyntaxTuple2 = new SupplyToSyntaxTuple2 {
val supplyToSyntax
: JavaTrialsScaffolding.SupplyToSyntax[JavaTuple2[Case1, Case2]] =
trialsOfPairs.withLimits(casesLimit, optionalLimits, shrinkageStop)
}
override def withRecipe(
recipe: String
): SupplyToSyntaxTuple2 = new SupplyToSyntaxTuple2 {
val supplyToSyntax
: JavaTrialsScaffolding.SupplyToSyntax[JavaTuple2[Case1, Case2]] =
trialsOfPairs.withRecipe(recipe)
}
}
class Tuple3Trials[Case1, Case2, Case3](
firstTrials: JavaTrials[Case1],
secondTrials: JavaTrials[Case2],
thirdTrials: JavaTrials[Case3]
) extends JavaTrialsScaffolding.Tuple3Trials[Case1, Case2, Case3] {
private def trialsOfTriples: JavaTrials[JavaTuple3[Case1, Case2, Case3]] =
(firstTrials, secondTrials, thirdTrials).mapN(
JavaTuple3.of[Case1, Case2, Case3]
)
override def reproduce(
recipe: String
): JavaTuple3[Case1, Case2, Case3] = trialsOfTriples.reproduce(recipe)
override def and[Case4](
fourthTrials: JavaTrials[Case4]
): JavaTrialsScaffolding.Tuple4Trials[Case1, Case2, Case3, Case4] =
new Tuple4Trials(firstTrials, secondTrials, thirdTrials, fourthTrials)
trait SupplyToSyntaxTuple3
extends JavaTrialsScaffolding.Tuple3Trials.SupplyToSyntaxTuple3[
Case1,
Case2,
Case3
] {
def supplyTo(triConsumer: Consumer3[Case1, Case2, Case3]): Unit = {
supplyTo((triple: JavaTuple3[Case1, Case2, Case3]) =>
triConsumer.accept(triple._1, triple._2, triple._3)
)
}
override def supplyTo(
consumer: Consumer[JavaTuple3[Case1, Case2, Case3]]
): Unit = { supplyToSyntax.supplyTo(consumer) }
override def asIterator: JavaIterator[JavaTuple3[Case1, Case2, Case3]] =
supplyToSyntax.asIterator
protected val supplyToSyntax: TrialsScaffolding.SupplyToSyntax[
JavaTuple3[Case1, Case2, Case3]
]
}
override def withLimit(
limit: Int
): SupplyToSyntaxTuple3 = new SupplyToSyntaxTuple3 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple3[Case1, Case2, Case3]
] =
trialsOfTriples.withLimit(limit)
}
override def withLimit(
limit: Int,
complexityLimit: Int
): SupplyToSyntaxTuple3 = new SupplyToSyntaxTuple3 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple3[Case1, Case2, Case3]
] =
trialsOfTriples.withLimit(limit, complexityLimit)
}
override def withLimits(
casesLimit: Int,
optionalLimits: TrialsScaffolding.OptionalLimits
): SupplyToSyntaxTuple3 = new SupplyToSyntaxTuple3 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple3[Case1, Case2, Case3]
] =
trialsOfTriples.withLimits(casesLimit, optionalLimits)
}
override def withLimits(
casesLimit: Int,
optionalLimits: TrialsScaffolding.OptionalLimits,
shrinkageStop: TrialsScaffolding.ShrinkageStop[
_ >: JavaTuple3[Case1, Case2, Case3]
]
): SupplyToSyntaxTuple3 = new SupplyToSyntaxTuple3 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple3[Case1, Case2, Case3]
] =
trialsOfTriples.withLimits(casesLimit, optionalLimits, shrinkageStop)
}
override def withRecipe(
recipe: String
): JavaTrialsScaffolding.Tuple3Trials.SupplyToSyntaxTuple3[
Case1,
Case2,
Case3
] =
new SupplyToSyntaxTuple3 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple3[Case1, Case2, Case3]
] =
trialsOfTriples.withRecipe(recipe)
}
}
class Tuple4Trials[Case1, Case2, Case3, Case4](
firstTrials: JavaTrials[Case1],
secondTrials: JavaTrials[Case2],
thirdTrials: JavaTrials[Case3],
fourthTrials: JavaTrials[Case4]
) extends JavaTrialsScaffolding.Tuple4Trials[Case1, Case2, Case3, Case4] {
private def trialsOfQuadruples: JavaTrials[
JavaTuple4[Case1, Case2, Case3, Case4]
] = (firstTrials, secondTrials, thirdTrials, fourthTrials).mapN(
JavaTuple4.of[Case1, Case2, Case3, Case4]
)
override def reproduce(
recipe: String
): JavaTuple4[Case1, Case2, Case3, Case4] =
trialsOfQuadruples.reproduce(recipe)
trait SupplyToSyntaxTuple4
extends JavaTrialsScaffolding.Tuple4Trials.SupplyToSyntaxTuple4[
Case1,
Case2,
Case3,
Case4
] {
def supplyTo(
quadConsumer: Consumer4[Case1, Case2, Case3, Case4]
): Unit = {
supplyTo((quadruple: JavaTuple4[Case1, Case2, Case3, Case4]) =>
quadConsumer.accept(
quadruple._1,
quadruple._2,
quadruple._3,
quadruple._4
)
)
}
override def supplyTo(
consumer: Consumer[JavaTuple4[Case1, Case2, Case3, Case4]]
): Unit = { supplyToSyntax.supplyTo(consumer) }
override def asIterator
: JavaIterator[JavaTuple4[Case1, Case2, Case3, Case4]] =
supplyToSyntax.asIterator
protected val supplyToSyntax: TrialsScaffolding.SupplyToSyntax[
JavaTuple4[Case1, Case2, Case3, Case4]
]
}
override def withLimit(
limit: Int
): SupplyToSyntaxTuple4 = new SupplyToSyntaxTuple4 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple4[Case1, Case2, Case3, Case4]
] =
trialsOfQuadruples.withLimit(limit)
}
override def withLimit(
limit: Int,
complexityLimit: Int
): SupplyToSyntaxTuple4 = new SupplyToSyntaxTuple4 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple4[Case1, Case2, Case3, Case4]
] =
trialsOfQuadruples.withLimit(limit, complexityLimit)
}
override def withLimits(
casesLimit: Int,
optionalLimits: TrialsScaffolding.OptionalLimits
): SupplyToSyntaxTuple4 = new SupplyToSyntaxTuple4 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple4[Case1, Case2, Case3, Case4]
] =
trialsOfQuadruples.withLimits(casesLimit, optionalLimits)
}
override def withLimits(
casesLimit: Int,
optionalLimits: TrialsScaffolding.OptionalLimits,
shrinkageStop: TrialsScaffolding.ShrinkageStop[
_ >: JavaTuple4[Case1, Case2, Case3, Case4]
]
): SupplyToSyntaxTuple4 = new SupplyToSyntaxTuple4 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple4[Case1, Case2, Case3, Case4]
] =
trialsOfQuadruples.withLimits(casesLimit, optionalLimits, shrinkageStop)
}
override def withRecipe(
recipe: String
): SupplyToSyntaxTuple4 = new SupplyToSyntaxTuple4 {
val supplyToSyntax: JavaTrialsScaffolding.SupplyToSyntax[
JavaTuple4[Case1, Case2, Case3, Case4]
] =
trialsOfQuadruples.withRecipe(recipe)
}
}
}
|
sageserpent-open/americium
|
src/main/scala/com/sageserpent/americium/java/tupleTrials.scala
|
Scala
|
mit
| 11,038
|
package score.discord.canti.jdamocks
import java.util
import java.util.concurrent.locks.ReentrantReadWriteLock
import net.dv8tion.jda.api.utils.cache.CacheView
import net.dv8tion.jda.api.utils.{ClosableIterator, LockIterator}
import scala.jdk.CollectionConverters.*
open class ScalaCacheView[T](cache: Iterable[T], getName: T => String) extends CacheView[T]:
val lock = ReentrantReadWriteLock()
override def asList(): util.List[T] = util.ArrayList[T](cache.asJavaCollection)
override def asSet(): util.NavigableSet[T] = util.TreeSet[T](cache.asJavaCollection)
override def size(): Long = cache.size
export cache.isEmpty
override def getElementsByName(name: String, ignoreCase: Boolean): util.List[T] =
val eq = if ignoreCase then name.equals else name.equalsIgnoreCase
util.ArrayList[T](cache.filter(el => eq(getName(el))).asJavaCollection)
override def stream(): util.stream.Stream[T] = cache.asJavaCollection.stream().nn
override def parallelStream(): util.stream.Stream[T] = cache.asJavaCollection.parallelStream().nn
override def iterator(): util.Iterator[T] = cache.iterator.asJava
override def lockedIterator(): ClosableIterator[T] =
val readLock = lock.readLock.nn
readLock.lock()
try LockIterator[T](cache.iterator.asJava, readLock)
catch
case t: Throwable =>
readLock.unlock()
throw t
|
ScoreUnder/canti-bot
|
src/test/scala/score/discord/canti/jdamocks/ScalaCacheView.scala
|
Scala
|
agpl-3.0
| 1,373
|
package com.avsystem.commons.misc
import com.avsystem.commons.SharedExtensions._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class OptArgTest extends AnyFunSuite with Matchers {
test("nonempty") {
val opt = OptArg(23)
opt match {
case OptArg(num) => assert(num == 23)
}
}
test("empty") {
val str: String = null
val opt = OptArg(str)
opt match {
case OptArg.Empty =>
}
}
test("null some") {
intercept[NullPointerException](OptArg.some[String](null))
}
def takeMaybeString(str: OptArg[String] = OptArg.Empty): Opt[String] = str.toOpt
test("argument passing") {
takeMaybeString() shouldEqual Opt.Empty
takeMaybeString("stringzor") shouldEqual "stringzor".opt
}
}
|
AVSystem/scala-commons
|
commons-core/src/test/scala/com/avsystem/commons/misc/OptArgTest.scala
|
Scala
|
mit
| 780
|
package org.scalaide.ui.internal.editor
import org.eclipse.core.runtime.Status
import org.eclipse.core.runtime.jobs.Job
import org.eclipse.jdt.core.dom.CompilationUnit
import org.eclipse.jface.text.IDocumentExtension4
import org.eclipse.jface.text.ITextSelection
import org.eclipse.jface.text.Position
import org.eclipse.jface.text.source.Annotation
import org.eclipse.jface.text.source.IAnnotationModel
import org.eclipse.jface.viewers.ISelection
import org.eclipse.ui.ISelectionListener
import org.eclipse.ui.IWorkbenchPart
import org.scalaide.core.internal.decorators.markoccurrences.Occurrences
import org.scalaide.core.internal.decorators.markoccurrences.ScalaOccurrencesFinder
import org.scalaide.util.Utils
import org.scalaide.util.eclipse.EclipseUtils
import org.scalaide.util.eclipse.EditorUtils
import org.scalaide.util.internal.eclipse.AnnotationUtils._
/**
* Contains functionality to enable the mark occurrences feature for Scala
* compilation unit editors.
*/
trait MarkOccurrencesEditorExtension extends ScalaCompilationUnitEditor {
// needs to be lazy because [[getInteractiveCompilationUnit]] succeeds only after the editor is fully loaded
private lazy val occurrencesFinder = new ScalaOccurrencesFinder(getInteractiveCompilationUnit)
private var occurrenceAnnotations: Set[Annotation] = Set()
private var occurencesFinderInstalled = false
private var runningJob: Job = _
private lazy val selectionListener = new ISelectionListener() {
override def selectionChanged(part: IWorkbenchPart, selection: ISelection) = {
selection match {
case textSel: ITextSelection => requireOccurrencesUpdate(textSel)
case _ =>
}
}
}
override def updateOccurrenceAnnotations(selection: ITextSelection, astRoot: CompilationUnit): Unit = {
requireOccurrencesUpdate(selection)
}
override def installOccurrencesFinder(forceUpdate: Boolean): Unit = {
if (!occurencesFinderInstalled) {
getEditorSite.getPage.addPostSelectionListener(selectionListener)
occurencesFinderInstalled = true
}
}
override def uninstallOccurrencesFinder(): Unit = {
occurencesFinderInstalled = false
getEditorSite.getPage.removePostSelectionListener(selectionListener)
removeScalaOccurrenceAnnotations()
}
/** Clear the existing Mark Occurrences annotations.
*/
private def removeScalaOccurrenceAnnotations() = {
for (annotationModel <- getAnnotationModelOpt) annotationModel.withLock {
annotationModel.replaceAnnotations(occurrenceAnnotations, Map())
occurrenceAnnotations = Set()
}
}
private def requireOccurrencesUpdate(selection: ITextSelection): Unit = {
def spawnNewJob(lastModified: Long) = {
runningJob = EclipseUtils.scheduleJob("Updating occurrence annotations", priority = Job.DECORATE) { monitor =>
Option(getInteractiveCompilationUnit) foreach { cu =>
val fileName = cu.file.name
Utils.debugTimed(s"""Time elapsed for "updateOccurrences" in source $fileName""") {
performOccurrencesUpdate(selection, lastModified)
}
}
Status.OK_STATUS
}
}
if (selection.getLength >= 0
&& selection.getOffset >= 0
&& getDocumentProvider != null
&& EditorUtils.isActiveEditor(this))
sourceViewer.getDocument match {
// don't spawn a new job when another one is already running
case document: IDocumentExtension4 if runningJob == null || runningJob.getState == Job.NONE β
spawnNewJob(document.getModificationStamp)
case _ =>
}
}
private def performOccurrencesUpdate(selection: ITextSelection, documentLastModified: Long) = {
val annotations = getAnnotations(selection, documentLastModified)
for(annotationModel <- getAnnotationModelOpt) annotationModel.withLock {
annotationModel.replaceAnnotations(occurrenceAnnotations, annotations)
occurrenceAnnotations = annotations.keySet
}
}
private def getAnnotations(selection: ITextSelection, documentLastModified: Long): Map[Annotation, Position] = {
val region = EditorUtils.textSelection2region(selection)
val occurrences = occurrencesFinder.findOccurrences(region, documentLastModified)
for {
Occurrences(name, locations) <- occurrences.toList
location <- locations
annotation = new Annotation(ScalaSourceFileEditor.OCCURRENCE_ANNOTATION, false, "Occurrence of '" + name + "'")
position = new Position(location.getOffset, location.getLength)
} yield annotation -> position
}.toMap
/** Returns the annotation model of the current document provider.
*/
private def getAnnotationModelOpt: Option[IAnnotationModel] = {
for {
documentProvider <- Option(getDocumentProvider)
annotationModel <- Option(documentProvider.getAnnotationModel(getEditorInput))
} yield annotationModel
}
}
|
stephenh/scala-ide
|
org.scala-ide.sdt.core/src/org/scalaide/ui/internal/editor/MarkOccurrencesEditorExtension.scala
|
Scala
|
bsd-3-clause
| 4,894
|
/*
* Copyright (c) 2018 by Andrew Charneski.
*
* The author licenses this file to you under the
* Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance
* with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
///*
// * Copyright (c) 2017 by Andrew Charneski.
// *
// * The author licenses this file to you under the
// * Apache License, Version 2.0 (the "License");
// * you may not use this file except in compliance
// * with the License. You may obtain a copy
// * of the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing,
// * software distributed under the License is distributed on an
// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// * KIND, either express or implied. See the License for the
// * specific language governing permissions and limitations
// * under the License.
// */
//
//package interactive.superres
//
//import java.awt.image.BufferedImage
//import java.awt.{Graphics2D, RenderingHints}
//import java.io._
//import java.lang
//import java.util.concurrent.TimeUnit
//import java.util.function.{DoubleSupplier, IntToDoubleFunction}
//
//import _root_.util.Java8Util.cvt
//import _root_.util._
//import com.simiacryptus.mindseye.eval._
//import com.simiacryptus.mindseye.lang.{Coordinate, NNExecutionContext, LayerBase, Tensor}
//import com.simiacryptus.mindseye.layers.aparapi.ConvolutionLayer
//import com.simiacryptus.mindseye.layers.java._
//import com.simiacryptus.mindseye.network.{DAGNode, PipelineNetwork, SimpleLossNetwork, SupervisedNetwork}
//import com.simiacryptus.mindseye.opt._
//import com.simiacryptus.mindseye.opt.line._
//import com.simiacryptus.mindseye.opt.orient._
//import com.simiacryptus.mindseye.eval.data.ImageTiles.ImageTensorLoader
//import com.simiacryptus.util.io.HtmlNotebookOutput
//import com.simiacryptus.util.{MonitoredObject, StreamNanoHTTPD, TableOutput, Util}
//import util.NNLayerUtil._
//
//import scala.collection.JavaConverters._
//import scala.util.Random
//
//case class DeepNetworkUpsample(
// weight1 : Double,
// weight2 : Double,
// weight3 : Double,
// weight4 : Double
// ) {
//
// def getNetwork(monitor: TrainingMonitor,
// monitoringRoot : MonitoredObject,
// fitness : Boolean = false) : LayerBase = {
// val parameters = this
// var network: PipelineNetwork = if(fitness) {
// new PipelineNetwork(2)
// } else {
// new PipelineNetwork(1)
// }
// val zeroSeed : IntToDoubleFunction = Java8Util.cvt(_ β 0.0)
// def buildLayer(from: Int,
// to: Int,
// layerNumber: String,
// weights: Double,
// layerRadius: Int = 3,
// activationLayer: LayerBase = new ReLuActivationLayer()) = {
// def weightSeed : DoubleSupplier = Java8Util.cvt(() β {
// val r = Util.R.get.nextDouble() * 2 - 1
// r * weights
// })
// network.add(new ImgBandBiasLayer(from).setWeights(zeroSeed).setName("bias_" + layerNumber).addTo(monitoringRoot))
// if (null != activationLayer) {
// network.add(activationLayer.setName("activation_" + layerNumber).freeze.addTo(monitoringRoot))
// }
// network.add(new ConvolutionLayer(layerRadius, layerRadius, from * to).setWeights(weightSeed).setName("conv_" + layerNumber).addTo(monitoringRoot));
// //network.fn(new MonitoringSynapse().addTo(monitoringRoot).setName("output_" + layerNumber))
// }
//
// val l1 = buildLayer(3, 64, "0", weights = Math.pow(10, parameters.weight1), activationLayer = null)
// val l2 = buildLayer(64, 64, "1", weights = Math.pow(10, parameters.weight2), activationLayer = new HyperbolicActivationLayer().setScale(5).freeze())
// network.add(new ImgReshapeLayer(2,2,true))
// val l3 = buildLayer(16, 12, "2", weights = Math.pow(10, parameters.weight3), activationLayer = new HyperbolicActivationLayer().setScale(5).freeze())
// buildLayer(12, 12, "3", weights = Math.pow(10, parameters.weight4), activationLayer = new HyperbolicActivationLayer().setScale(5).freeze())
// network.add(new ImgReshapeLayer(2,2,true))
//
// if(fitness) {
// val output = network.getHead
// def normalizeStdDev(layer:DAGNode, target:Double) = network.add(new AbsActivationLayer(), network.add(new SumInputsLayer(),
// network.add(new AvgReducerLayer(), network.add(new StdDevMetaLayer(), layer)),
// network.add(new ConstLayer(new Tensor(1).setBytes(0,-target)))
// ))
// network.add(new ProductInputsLayer(), network.add(new MeanSqLossLayer(), output, network.getInput(1)), network.add(new SumInputsLayer(),
// network.add(new ConstLayer(new Tensor(1).setBytes(0,1))),
// normalizeStdDev(l1,16),
// normalizeStdDev(l2,16),
// normalizeStdDev(l3,16)
// ))
// }
//
// network
// }
//
// def fitness(monitor: TrainingMonitor, monitoringRoot : MonitoredObject, data: Array[Array[Tensor]], n: Int = 2) : Double = {
// val values = (1 to n).map(i β {
// val network = getNetwork(monitor, monitoringRoot, fitness = true)
// val measure = new ArrayTrainable(data, network).measure(monitor)
// measure.sum
// }).toList
// val avg = values.sum / n
// monitor.log(s"Numeric Opt: $this => $avg ($values)")
// avg
// }
//
//}
//
//
//class UpsamplingModel(source: String, server: StreamNanoHTTPD, out: HtmlNotebookOutput with ScalaNotebookOutput) extends MindsEyeNotebook(server, out) {
//
// val modelName = System.getProperty("modelName","upsample_1")
// val tileSize = 128
// val fitnessBorderPadding = 8
// val scaleFactor: Double = (64 * 64.0) / (tileSize * tileSize)
//
// def eval(awaitExit:Boolean=true): Unit = {
// defineHeader()
// defineTestHandler()
// out.out("<hr/>")
// if(findFile(modelName).isEmpty || System.getProperties.containsKey("rebuild")) step_Generate()
// //step_SGD((100 * scaleFactor).toInt, 30, reshufflePeriod = 5)
// def rates = step_diagnostics_layerRates().map(eβe._1.getNameβe._2.rate)
// step_LBFGS((100 * scaleFactor).toInt, 30, 50)
// step_SGD((100 * scaleFactor).toInt, 30, reshufflePeriod = 5) // , rates = rates)
// step_LBFGS((500 * scaleFactor).toInt, 60, 50)
// step_SGD((500 * scaleFactor).toInt, 60, reshufflePeriod = 5) // , rates = rates)
// out.out("<hr/>")
// if(awaitExit) waitForExit()
// }
//
// def resize(source: BufferedImage, size: Int) = {
// val image = new BufferedImage(size, size, BufferedImage.TYPE_INT_ARGB)
// val graphics = image.getGraphics.asInstanceOf[Graphics2D]
// graphics.asInstanceOf[Graphics2D].setRenderingHints(new RenderingHints(RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BICUBIC))
// graphics.drawImage(source, 0, 0, size, size, null)
// image
// }
//
// lazy val data : List[Array[Tensor]] = {
// out.p("Loading data from " + source)
// val rawList: List[Tensor] = rawData
// System.gc()
// val data: List[Array[Tensor]] = rawList.map(tile β Array(Tensor.fromRGB(resize(tile.toRgbImage, tileSize/4)), tile))
// out.eval {
// TableOutput.create(Random.shuffle(data).take(100).map(testObj β Map[String, AnyRef](
// "Source" β out.image(testObj(1).toRgbImage(), ""),
// "Resized" β out.image(testObj(0).toRgbImage(), "")
// ).asJava): _*)
// }
// out.p("Loading data complete")
// data
// }
//
// private def rawData() = {
// val loader = new ImageTensorLoader(new File(source), tileSize, tileSize, tileSize, tileSize, 10, 10)
// val rawList = loader.stream().iterator().asScala.take((10000 * scaleFactor).toInt).toList
// loader.stop()
// rawList
// }
//
//
// def step_Generate() = {
// phase({
// val optTraining: Array[Array[Tensor]] = Random.shuffle(data).take((10 * scaleFactor).ceil.toInt).toArray
// SimplexOptimizer[DeepNetworkUpsample](
// DeepNetworkUpsample(-0.19628151652396514,-1.120332072478063,-1.5337950986957058,-1.5337950986957058),
// x β x.fitness(monitor, monitoringRoot, optTraining, n=2), relativeTolerance=0.15
// ).getNetwork(monitor, monitoringRoot)
// }, (model: LayerBase) β {
// out.h1("Step 1")
// monitor.clear()
// val trainer = out.eval {
// val trainingNetwork: SupervisedNetwork = new SimpleLossNetwork(model, lossNetwork)
// val dataArray = data.toArray
// var heapCopy: Trainable = new SampledArrayTrainable(dataArray, trainingNetwork, (50 * scaleFactor).toInt)
// //heapCopy = new ConstL12Normalizer(heapCopy).setFactor_L1(0.001)
// val trainer = new IterativeTrainer(heapCopy)
// trainer.setMonitor(monitor)
// trainer.setTimeout(15, TimeUnit.MINUTES)
// trainer.setIterationsPerSample(1)
// val momentum = new GradientDescent()
// trainer.setOrientation(momentum)
// trainer.setLineSearchFactory(Java8Util.cvt((s) β new QuadraticSearch))
// trainer.setTerminateThreshold(2500.0)
// trainer
// }
// trainer.eval()
// }: Unit, modelName)
// }
//
// def step_diagnostics_layerRates(sampleSize : Int = (100 * scaleFactor).toInt) = phase[Map[LayerBase, LayerRateDiagnosticTrainer.LayerStats]](modelName, (model: LayerBase) β {
// monitor.clear()
// val trainingNetwork: SupervisedNetwork = new SimpleLossNetwork(model, lossNetwork)
// val dataArray = data.toArray
// out.h1("Diagnostics - LayerBase Rates")
// val result = out.eval {
// var heapCopy: Trainable = new SampledArrayTrainable(dataArray, trainingNetwork, sampleSize)
// val trainer = new LayerRateDiagnosticTrainer(heapCopy).setStrict(true).setMaxIterations(1)
// trainer.setMonitor(monitor)
// trainer.eval()
// trainer.getLayerRates().asScala.toMap
// }
// result
// }, modelName)
//
// def step_SGD(sampleSize: Int, timeoutMin: Int, termValue: Double = 0.0, momentum: Double = 0.2, maxIterations: Int = Integer.MAX_VALUE, reshufflePeriod: Int = 1,rates: Map[String, Double] = Map.empty) = phase(modelName, (model: LayerBase) β {
// monitor.clear()
// out.h1(s"SGD(sampleSize=$sampleSize,timeoutMin=$timeoutMin)")
// val trainer = out.eval {
// val trainingNetwork: SupervisedNetwork = new SimpleLossNetwork(model, lossNetwork)
// val dataArray = data.toArray
// var heapCopy: Trainable = new SampledArrayTrainable(dataArray, trainingNetwork, sampleSize)
// //heapCopy = new ConstL12Normalizer(heapCopy).setFactor_L1(0.001)
// val trainer = new IterativeTrainer(heapCopy)
// trainer.setMonitor(monitor)
// trainer.setTimeout(timeoutMin, TimeUnit.MINUTES)
// trainer.setIterationsPerSample(reshufflePeriod)
// val momentumStrategy = new MomentumStrategy(new GradientDescent()).setCarryOver(momentum)
// val reweight = new LayerReweightingStrategy(momentumStrategy) {
// override def getRegionPolicy(layer: LayerBase): lang.Double = layer.getName match {
// case key if rates.contains(key) β rates(key)
// case _ β 1.0
// }
// override def reset(): Unit = {}
// }
// trainer.setOrientation(reweight)
// trainer.setLineSearchFactory(Java8Util.cvt((s)βnew ArmijoWolfeSearch().setAlpha(1e-12).setC1(0).setC2(1)))
// //trainer.setLineSearchFactory(Java8Util.cvt((s)βnew BisectionSearch()))
// trainer.setTerminateThreshold(termValue)
// trainer.setMaxIterations(maxIterations)
// trainer
// }
// val result = trainer.eval()
// result
// }, modelName)
//
// def step_LBFGS(sampleSize: Int, timeoutMin: Int, iterationSize: Int): Unit = phase(modelName, (model: LayerBase) β {
// monitor.clear()
// out.h1(s"LBFGS(sampleSize=$sampleSize,timeoutMin=$timeoutMin)")
// val trainer = out.eval {
// val trainingNetwork: SupervisedNetwork = new SimpleLossNetwork(model, lossNetwork)
// val heapCopy = new SampledArrayTrainable(data.toArray, trainingNetwork, sampleSize)
// val trainer = new com.simiacryptus.mindseye.opt.IterativeTrainer(heapCopy)
// trainer.setMonitor(monitor)
// trainer.setTimeout(timeoutMin, TimeUnit.MINUTES)
// trainer.setIterationsPerSample(iterationSize)
// val lbfgs = new LBFGS().setMaxHistory(35).setMinHistory(4)
// trainer.setOrientation(lbfgs)
// trainer.setLineSearchFactory(Java8Util.cvt((s:String)β(s match {
// case s if s.contains("LBFGS") β new StaticLearningRate().setRate(1.0)
// case _ β new ArmijoWolfeSearch().setAlpha(1e-5)
// })))
// trainer.setTerminateThreshold(0.0)
// trainer
// }
// trainer.eval()
// }, modelName)
//
// def lossNetwork = {
// val mask: Tensor = new Tensor(tileSize, tileSize, 3).mapCoords(Java8Util.cvt((c: Coordinate) β {
// if (c.coords(0) < fitnessBorderPadding || c.coords(0) >= (tileSize - fitnessBorderPadding)) {
// 0.0
// } else if (c.coords(1) < fitnessBorderPadding || c.coords(1) >= (tileSize - fitnessBorderPadding)) {
// 0.0
// } else {
// 1.0
// }
// }))
// val lossNetwork = new PipelineNetwork(2)
// val maskNode = lossNetwork.add(new ConstLayer(mask).freeze())
// lossNetwork.add(new MeanSqLossLayer(), lossNetwork.add(new ProductInputsLayer(), lossNetwork.getInput(0), maskNode), lossNetwork.add(new ProductInputsLayer(), lossNetwork.getInput(1), maskNode))
// lossNetwork
// }
//
// def defineTestHandler() = {
// out.p("<a href='eval.html'>Test Reconstruction</a>")
// server.addSyncHandler("eval.html", "text/html", cvt(o β {
// Option(new HtmlNotebookOutput(out.workingDir, o) with ScalaNotebookOutput).foreach(out β {
// try {
// out.eval {
// TableOutput.create(Random.shuffle(data).take(100).map(testObj β Map[String, AnyRef](
// "Source Truth" β out.image(testObj(1).toRgbImage(), ""),
// "Corrupted" β out.image(testObj(0).toRgbImage(), ""),
// "Reconstruction" β out.image(getModelCheckpoint.eval(new NNExecutionContext() {}, testObj(0)).getData.get(0).toRgbImage(), "")
// ).asJava): _*)
// }
// } catch {
// case e: Throwable β e.printStackTrace()
// }
// })
// }), false)
// }
//
//}
//
//object UpsamplingModel extends Report {
//
// def main(args: Array[String]): Unit = {
//
// report((server, out) β args match {
// case Array(source) β new UpsamplingModel(source, server, out).eval()
// case _ β new UpsamplingModel("E:\\\\testImages\\\\256_ObjectCategories", server, out).eval()
// })
//
// }
//
//}
|
acharneski/ImageLabs
|
src/main/scala/interactive/superres/UpsamplingModel.scala
|
Scala
|
apache-2.0
| 15,324
|
package org.joda.time.format
import java.io.Writer
import java.util.Locale
import org.joda.time.MutablePeriod
import org.joda.time.Period
import org.joda.time.PeriodType
import org.joda.time.ReadWritablePeriod
import org.joda.time.ReadablePeriod
class PeriodFormatter {
private var iLocale: Locale = null
private var iParseType: PeriodType = null
private var iPrinter: PeriodPrinter = null
private var iParser: PeriodParser = null
def this(printer: PeriodPrinter, parser: PeriodParser) {
this()
iPrinter = printer
iParser = parser
iLocale = null
iParseType = null
}
def this(printer: PeriodPrinter,
parser: PeriodParser,
locale: Locale,
`type`: PeriodType) {
this()
iPrinter = printer
iParser = parser
iLocale = locale
iParseType = `type`
}
def isPrinter(): Boolean = iPrinter != null
def getPrinter(): PeriodPrinter = iPrinter
def isParser(): Boolean = iParser != null
def getParser(): PeriodParser = iParser
def withLocale(locale: Locale): PeriodFormatter = {
if (locale == getLocale || (locale != null && locale == getLocale)) {
return this
}
new PeriodFormatter(iPrinter, iParser, locale, iParseType)
}
def getLocale(): Locale = iLocale
def withParseType(`type`: PeriodType): PeriodFormatter = {
if (`type` == iParseType) {
return this
}
new PeriodFormatter(iPrinter, iParser, iLocale, `type`)
}
def getParseType(): PeriodType = iParseType
def printTo(buf: StringBuffer, period: ReadablePeriod) {
checkPrinter()
checkPeriod(period)
getPrinter.printTo(buf, period, iLocale)
}
def printTo(out: Writer, period: ReadablePeriod) {
checkPrinter()
checkPeriod(period)
getPrinter.printTo(out, period, iLocale)
}
def print(period: ReadablePeriod): String = {
checkPrinter()
checkPeriod(period)
val printer = getPrinter
val buf = new StringBuffer(printer.calculatePrintedLength(period, iLocale))
printer.printTo(buf, period, iLocale)
buf.toString
}
private def checkPrinter() {
if (iPrinter == null) {
throw new UnsupportedOperationException("Printing not supported")
}
}
private def checkPeriod(period: ReadablePeriod) {
if (period == null) {
throw new IllegalArgumentException("Period must not be null")
}
}
def parseInto(period: ReadWritablePeriod, text: String, position: Int): Int = {
checkParser()
checkPeriod(period)
getParser.parseInto(period, text, position, iLocale)
}
def parsePeriod(text: String): Period = {
checkParser()
parseMutablePeriod(text).toPeriod()
}
def parseMutablePeriod(text: String): MutablePeriod = {
checkParser()
val period = new MutablePeriod(0, iParseType)
var newPos = getParser.parseInto(period, text, 0, iLocale)
if (newPos >= 0) {
if (newPos >= text.length) {
return period
}
} else {
newPos = ~newPos
}
throw new IllegalArgumentException(
FormatUtils.createErrorMessage(text, newPos))
}
private def checkParser() {
if (iParser == null) {
throw new UnsupportedOperationException("Parsing not supported")
}
}
}
|
mdedetrich/soda-time
|
shared/src/main/scala/org/joda/time/format/PeriodFormatter.scala
|
Scala
|
bsd-2-clause
| 3,213
|
package fr.lium
package actor
import fr.lium.model.{Converted, FailedConversion, MediaFile}
import fr.lium.tables.MediaFiles
import java.io.File
import akka.actor.{Actor, ActorRef}
import akka.event.Logging
import sys.process._
import scala.slick.session.Database
import scala.slick.driver.SQLiteDriver.simple._
import Database.threadLocalSession
object Convertor {
case object Convert
case object Done
}
case class SoundConvertor(inputFile: MediaFile, fullPath: String, workingDir: String)
class SoundConvertorActor(ffmpegBin: File, database: Database, diarizationActor: ActorRef) extends Actor {
val log = Logging(context.system, this)
def receive = {
case Convertor.Convert => log.info("I'm converting baby!")
case SoundConvertor(mediaFile, fullPath, workingDir) =>
{
val ffmpegBinPath = ffmpegBin.getPath()
val commandLine: String = s"$ffmpegBinPath -y -i $fullPath -vn -acodec pcm_s16le -ac 1 -ar 16000 $fullPath.wav"
log.info("Converting file: " + fullPath)
log.info("Full command: " + commandLine)
//wave 16bits PCM (pcm_s16le), 16kHz (-ar 16000), mono (-ac 1)
val result: Int = commandLine.!
log.info("result: " + result)
database.withSession {
//Update status in database
mediaFile.id.foreach { id =>
MediaFiles.updateStatus(id, (if (result == 0) Converted else FailedConversion).toString)
diarizationActor ! ComputeDiarization(mediaFile, fullPath + ".wav", workingDir)
}
}
}
}
}
|
bsalimi/speech-recognition-api
|
app/actors/SoundConvertorActor.scala
|
Scala
|
mit
| 1,529
|
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.tracing.rewriters
import org.neo4j.cypher.internal.frontend.v2_3.Rewriter
sealed trait RewriterTask
final case class RunConditions(previousName: Option[String], conditions: Set[RewriterCondition]) extends RewriterTask
final case class RunRewriter(name: String, rewriter: Rewriter) extends RewriterTask
|
HuangLS/neo4j
|
community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/tracing/rewriters/RewriterTask.scala
|
Scala
|
apache-2.0
| 1,148
|
class Racional(n: Int, d: Int) {
val numerador = n
val denominador = d
def Multiplicacion (b: Racional): Racional = {
new Racional(
numerador * b.numerador ,
denominador * b.denominador
)
}
}
print("Numerador de la primera fraccion: ")
val n1 = readInt()
print("Denominador de la primera fraccion: ")
val d1 = readInt()
val fraccion1 = new Racional(n1, d1)
print("Numerador de la primera fraccion: ")
val n2 = readInt()
print("Denominador de la primera fraccion: ")
val d2 = readInt()
val fraccion2 = new Racional(n2, d2)
val Multiplicacion = fraccion1.Multiplicacion (fraccion2)
println ("Resultado: " + Multiplicacion.numerador + "/" + Multiplicacion.denominador)
|
diana170309/poo1-1
|
Racionales-Multiplicacion.scala
|
Scala
|
mit
| 699
|
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.csv.util
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import scala.util.control.Exception._
private[csv] object InferSchema {
/**
* Similar to the JSON schema inference. [[org.apache.spark.sql.json.InferSchema]]
* 1. Infer type of each row
* 2. Merge row types to find common type
* 3. Replace any null types with string type
*/
def apply(tokenRdd: RDD[Array[String]], header: Array[String]): StructType = {
val startType = Array.fill[DataType](header.length)(NullType)
val rootTypes = tokenRdd.aggregate(startType)(inferRowType, mergeRowTypes)
val stuctFields = header.zip(rootTypes).map { case (thisHeader, rootType) =>
StructField(thisHeader, rootType, nullable = true)
}
StructType(stuctFields)
}
private[csv] def inferRowType(rowSoFar: Array[DataType], next: Array[String]) = {
var i = 0
while(i < math.min(rowSoFar.length, next.length)){ // May have columns on right missing.
rowSoFar(i) = inferField(rowSoFar(i), next(i))
i+=1
}
rowSoFar
}
private[csv] def mergeRowTypes(first: Array[DataType], second: Array[DataType]) = {
first.zipAll(second, NullType, NullType).map { case ((a, b)) =>
val tpe = findTightestCommonType(a, b).getOrElse(StringType)
tpe match {
case _: NullType => StringType
case other => other
}
}
}
/**
* Infer type of string field. Given known type Double, and a string "1", there is no
* point checking if it is an Int, as the final type must be Double or higher.
*/
private[csv] def inferField(typeSoFar: DataType, field: String): DataType = {
if (field == null || field.isEmpty){
typeSoFar
} else {
typeSoFar match {
case NullType => tryParseInteger(field)
case IntegerType => tryParseInteger(field)
case LongType => tryParseLong(field)
case DoubleType => tryParseDouble(field)
case StringType => StringType
}
}
}
def tryParseDouble(field: String) = if ((allCatch opt field.toDouble).isDefined){
DoubleType
} else {
StringType
}
def tryParseLong(field: String) = if ((allCatch opt field.toLong).isDefined){
LongType
}else {
tryParseDouble(field)
}
def tryParseInteger(field: String) = if((allCatch opt field.toInt).isDefined){
IntegerType
} else {
tryParseLong(field)
}
/**
* Copied from internal Spark api
* [[org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion]]
*/
private val numericPrecedence =
IndexedSeq(
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
DecimalType.Unlimited)
/**
* Copied from internal Spark api
* [[org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion]]
*/
val findTightestCommonType: (DataType, DataType) => Option[DataType] = {
case (t1, t2) if t1 == t2 => Some(t1)
case (NullType, t1) => Some(t1)
case (t1, NullType) => Some(t1)
// Promote numeric types to the highest of the two and all numeric types to unlimited decimal
case (t1, t2) if Seq(t1, t2).forall(numericPrecedence.contains) =>
val index = numericPrecedence.lastIndexWhere(t => t == t1 || t == t2)
Some(numericPrecedence(index))
case _ => None
}
}
|
mohitjaggi/spark-csv
|
src/main/scala/com/databricks/spark/csv/util/InferSchema.scala
|
Scala
|
apache-2.0
| 3,933
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.