code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* * Copyright (c) 2014 Snowplow Analytics Ltd. * All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache * License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. * * See the Apache License Version 2.0 for the specific language * governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch // Scalaz import scalaz._ import Scalaz._ // json4s import org.json4s._ import org.json4s.jackson.JsonMethods._ import org.json4s.JsonDSL._ // Scala import scala.util.matching.Regex import scala.annotation.tailrec /** * Converts unstructured events and custom contexts to a format which the Elasticsearch * mapper can understand */ object Shredder { private[elasticsearch] val schemaPattern = """^iglu:([a-zA-Z0-9-_.]+)/([a-zA-Z0-9-_]+)/[a-zA-Z0-9-_]+/([0-9]+-[0-9]+-[0-9]+)$""".r /** * Create an Elasticsearch field name from a schema * * "iglu:com.acme/PascalCase/jsonschema/13-0-0" -> "context_com_acme_pascal_case_13" * * @param prefix "context" or "unstruct_event" * @param schema Schema field from an incoming JSON * @return Elasticsearch field name */ // TODO: move this to shared storage/shredding utils // See https://github.com/snowplow/snowplow/issues/1189 def fixSchema(prefix: String, schema: String): ValidationNel[String, String] = { schema match { case schemaPattern(organization, name, schemaVer) => { // Split the vendor's reversed domain name using underscores rather than dots val snakeCaseOrganization = organization.replaceAll("""[-.]""", "_").toLowerCase // Change the name from PascalCase to snake_case if necessary and replace hyphens with underscores val snakeCaseName = name.replaceAll("([^A-Z_])([A-Z])", "$1_$2").toLowerCase // Extract the schemaver version's model val model = schemaVer.split("-")(0) s"${prefix}_${snakeCaseOrganization}_${snakeCaseName}_${model}".successNel } case _ => "Schema %s does not conform to regular expression %s".format(schema, schemaPattern.toString).failNel } } /** * Convert a contexts JSON to an Elasticsearch-compatible JObject * For example, the JSON * * { * "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0", * "data": [ * { * "schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0", * "data": { * "unique": true * } * }, * { * "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0", * "data": { * "value": 1 * } * }, * { * "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0", * "data": { * "value": 2 * } * } * ] * } * * would become * * { * "context_com_acme_duplicated_1": [{"value": 1}, {"value": 2}], * "context_com_acme_unduplicated_1": [{"unique": true}] * } * * @param contexts Contexts JSON * @return Contexts JSON in an Elasticsearch-compatible format */ def parseContexts(contexts: String): ValidationNel[String, JObject] = { /** * Validates and pairs up the schema and data fields without grouping the same schemas together * * For example, the JSON * * { * "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0", * "data": [ * { * "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0", * "data": { * "value": 1 * } * }, * { * "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0", * "data": { * "value": 2 * } * } * ] * } * * would become * * [ * {"contexts_com_acme_duplicated_1": {"value": 1}}, * {"contexts_com_acme_duplicated_1": {"value": 2}} * ] * * @param contextJsons List of inner custom context JSONs * @param accumulator Custom contexts which have already been parsed * @return List of validated tuples containing a fixed schema string and the original data JObject */ @tailrec def innerParseContexts(contextJsons: List[JValue], accumulator: List[ValidationNel[String, (String, JValue)]]): List[ValidationNel[String, (String, JValue)]] = { contextJsons match { case Nil => accumulator case head :: tail => { val context = head val innerData = context \\ "data" match { case JNothing => "Could not extract inner data field from custom context".failNel // TODO: decide whether to enforce object type of data case d => d.successNel } val fixedSchema: ValidationNel[String, String] = context \\ "schema" match { case JString(schema) => fixSchema("contexts", schema) case _ => "Context JSON did not contain a stringly typed schema field".failNel } val schemaDataPair = (fixedSchema |@| innerData) {_ -> _} innerParseContexts(tail, schemaDataPair :: accumulator) } } } val json = parse(contexts) val data = json \\ "data" data match { case JArray(Nil) => "Custom contexts data array is empty".failNel case JArray(ls) => { val innerContexts: ValidationNel[String, List[(String, JValue)]] = innerParseContexts(ls, Nil).sequenceU // Group contexts with the same schema together innerContexts.map(_.groupBy(_._1).map(pair => (pair._1, pair._2.map(_._2)))) } case _ => "Could not extract contexts data field as an array".failNel } } /** * Convert an unstructured event JSON to an Elasticsearch-compatible JObject * For example, the JSON * * { * "schema": "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0", * "data": { * "schema": "iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-1", * "data": { * "key": "value" * } * } * } * * would become * * { * "unstruct_event_com_snowplowanalytics_snowplow_link_click_1": {"key": "value"} * } * * @param unstruct Unstructured event JSON * @return Unstructured event JSON in an Elasticsearch-compatible format */ def parseUnstruct(unstruct: String): ValidationNel[String, JObject] = { val json = parse(unstruct) val data = json \\ "data" val schema = data \\ "schema" val innerData = data \\ "data" match { case JNothing => "Could not extract inner data field from unstructured event".failNel // TODO: decide whether to enforce object type of data case d => d.successNel } val fixedSchema = schema match { case JString(s) => fixSchema("unstruct_event", s) case _ => "Unstructured event JSON did not contain a stringly typed schema field".failNel } (fixedSchema |@| innerData) {_ -> _} } }
jramos/snowplow
4-storage/kinesis-elasticsearch-sink/src/main/scala/com.snowplowanalytics.snowplow.storage.kinesis/elasticsearch/Shredder.scala
Scala
apache-2.0
7,432
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.frontend.v2_3.ast import org.neo4j.cypher.internal.frontend.v2_3.ast.Expression.SemanticContext import org.neo4j.cypher.internal.frontend.v2_3.symbols._ import org.neo4j.cypher.internal.frontend.v2_3.{InputPosition, SemanticCheck} case class CaseExpression(expression: Option[Expression], alternatives: Seq[(Expression, Expression)], default: Option[Expression])(val position: InputPosition) extends Expression { lazy val possibleExpressions = alternatives.map(_._2) ++ default def semanticCheck(ctx: SemanticContext): SemanticCheck = { val possibleTypes = possibleExpressions.unionOfTypes expression.semanticCheck(ctx) chain alternatives.flatMap { a => Seq(a._1, a._2) }.semanticCheck(ctx) chain default.semanticCheck(ctx) chain when (expression.isEmpty) { alternatives.map(_._1).expectType(CTBoolean.covariant) } chain this.specifyType(possibleTypes) } }
HuangLS/neo4j
community/cypher/frontend-2.3/src/main/scala/org/neo4j/cypher/internal/frontend/v2_3/ast/CaseExpression.scala
Scala
apache-2.0
1,727
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.stream.table import org.apache.flink.api.scala._ import org.apache.flink.table.api._ import org.apache.flink.table.planner.expressions.utils.Func13 import org.apache.flink.table.planner.plan.optimize.program.FlinkStreamProgram import org.apache.flink.table.planner.utils._ import org.apache.calcite.rel.rules.CoreRules import org.apache.calcite.tools.RuleSets import org.junit.Test class CorrelateTest extends TableTestBase { @Test def testCrossJoin(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc1 util.addFunction("func1", function) val result1 = table.joinLateral(function('c) as 's).select('c, 's) util.verifyPlan(result1) } @Test def testCrossJoin2(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc1 util.addFunction("func1", function) // test overloading val result2 = table.joinLateral(function('c, "$") as 's).select('c, 's) util.verifyPlan(result2) } @Test def testLeftOuterJoinWithLiteralTrue(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc1 util.addFunction("func1", function) val result = table.leftOuterJoinLateral(function('c) as 's, true).select('c, 's) util.verifyPlan(result) } @Test def testCustomType(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc2 util.addFunction("func2", function) val scalarFunc = new Func13("pre") val result = table.joinLateral( function(scalarFunc('c)) as ('name, 'len)).select('c, 'name, 'len) util.verifyPlan(result) } @Test def testHierarchyType(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new HierarchyTableFunction util.addFunction("hierarchy", function) val result = table.joinLateral(function('c) as ('name, 'adult, 'len)) util.verifyPlan(result) } @Test def testPojoType(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new PojoTableFunc util.addFunction("pojo", function) val result = table.joinLateral(function('c)) util.verifyPlan(result) } @Test def testFilter(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc2 util.addFunction("func2", function) val result = table .joinLateral(function('c) as ('name, 'len)) .select('c, 'name, 'len) .filter('len > 2) util.verifyPlan(result) } @Test def testScalarFunction(): Unit = { val util = streamTestUtil() val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc1 util.addFunction("func1", function) val result = table.joinLateral(function('c.substring(2)) as 's) util.verifyPlan(result) } @Test def testCorrelateWithMultiFilter(): Unit = { val util = streamTestUtil() val sourceTable = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc0 util.addFunction("func1", function) val result = sourceTable.select('a, 'b, 'c) .joinLateral(function('c) as('d, 'e)) .select('c, 'd, 'e) .where('e > 10) .where('e > 20) .select('c, 'd) util.verifyPlan(result) } @Test def testCorrelateWithMultiFilterAndWithoutCalcMergeRules(): Unit = { val util = streamTestUtil() val programs = util.getStreamProgram() programs.getFlinkRuleSetProgram(FlinkStreamProgram.LOGICAL) .get.remove( RuleSets.ofList( CoreRules.CALC_MERGE, CoreRules.FILTER_CALC_MERGE, CoreRules.PROJECT_CALC_MERGE)) // removing util.replaceStreamProgram(programs) val sourceTable = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val function = new TableFunc0 util.addFunction("func1", function) val result = sourceTable.select('a, 'b, 'c) .joinLateral(function('c) as('d, 'e)) .select('c, 'd, 'e) .where('e > 10) .where('e > 20) .select('c, 'd) util.verifyPlan(result) } @Test def testFlatMap(): Unit = { val util = streamTestUtil() val func2 = new TableFunc2 val sourceTable = util.addTableSource[(Int, Long, String)]("MyTable", 'f1, 'f2, 'f3) val resultTable = sourceTable .flatMap(func2('f3)) util.verifyPlan(resultTable) } @Test def testCorrelatePythonTableFunction(): Unit = { val util = streamTestUtil() val sourceTable = util.addTableSource[(Int, Int, String)]("MyTable", 'a, 'b, 'c) val func = new MockPythonTableFunction val result = sourceTable.joinLateral(func('a, 'b) as('x, 'y)) util.verifyPlan(result) } }
greghogan/flink
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/CorrelateTest.scala
Scala
apache-2.0
6,008
package com.twitter.scalding.estimation.memory import org.apache.hadoop.mapred.JobConf object MemoryEstimatorConfig { /** Output param: what the original job map memory was. */ val originalMapMemory = "scalding.map.memory.estimator.original" /** Output param: what the original job reduce memory was. */ val originalReduceMemory = "scalding.reduce.memory.estimator.original" /** * Value of alpha for exponential smoothing. * Lower values ensure more smoothing and less importance to newer data * Higher values provide lesser smoothing and more importance to newer data */ val alphaKey = "scalding.memory.estimator.alpha" /** Indicates how much to scale the memory estimate after it's calculated */ val memoryScaleFactor = "scalding.memory.estimator.scale.factor" val XmxToMemoryScaleFactorKey = "scalding.memory.estimator.xmx.scale.factor" val maxContainerMemoryKey = "scalding.memory.estimator.container.max" val minContainerMemoryKey = "scalding.memory.estimator.container.min" /** yarn allocates in increments. So we might as well round up our container ask **/ val yarnSchedulerIncrementAllocationMB = "yarn.scheduler.increment-allocation-mb" /** Maximum number of history items to use for memory estimation. */ val maxHistoryKey = "scalding.memory.estimator.max.history" def getMaxContainerMemory(conf: JobConf): Long = conf.getLong(maxContainerMemoryKey, 8 * 1024) def getMinContainerMemory(conf: JobConf): Long = conf.getLong(minContainerMemoryKey, 1 * 1024) def getAlpha(conf: JobConf): Double = conf.getDouble(alphaKey, 1.0) def getScaleFactor(conf: JobConf): Double = conf.getDouble(memoryScaleFactor, 1.2) def getXmxScaleFactor(conf: JobConf): Double = conf.getDouble(XmxToMemoryScaleFactorKey, 1.25) def getYarnSchedulerIncrement(conf: JobConf): Int = conf.getInt(yarnSchedulerIncrementAllocationMB, 512) def getMaxHistory(conf: JobConf): Int = conf.getInt(maxHistoryKey, 5) }
tdyas/scalding
scalding-core/src/main/scala/com/twitter/scalding/estimation/memory/MemoryEstimatorConfig.scala
Scala
apache-2.0
1,962
/* * Copyright 2015 Heiko Seeberger * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.heikoseeberger.akkahttpjsoniterscala import akka.http.javadsl.common.JsonEntityStreamingSupport import akka.http.scaladsl.common.EntityStreamingSupport import akka.http.scaladsl.marshalling._ import akka.http.scaladsl.model.{ ContentType, ContentTypeRange, HttpEntity, MediaType, MessageEntity } import akka.http.scaladsl.model.MediaTypes.`application/json` import akka.http.scaladsl.unmarshalling.{ FromEntityUnmarshaller, Unmarshal, Unmarshaller } import akka.http.scaladsl.util.FastFuture import akka.stream.scaladsl.{ Flow, Source } import akka.util.ByteString import com.github.plokhotnyuk.jsoniter_scala.core._ import scala.collection.immutable.Seq import scala.concurrent.Future import scala.util.Try import scala.util.control.NonFatal /** * Automatic to and from JSON marshalling/unmarshalling using an in-scope instance of * JsonValueCodec */ object JsoniterScalaSupport extends JsoniterScalaSupport { val defaultReaderConfig: ReaderConfig = ReaderConfig.withPreferredBufSize(100 * 1024).withPreferredCharBufSize(10 * 1024) val defaultWriterConfig: WriterConfig = WriterConfig.withPreferredBufSize(100 * 1024) } /** * JSON marshalling/unmarshalling using an in-scope instance of JsonValueCodec */ trait JsoniterScalaSupport { type SourceOf[A] = Source[A, _] import JsoniterScalaSupport._ private val defaultMediaTypes: Seq[MediaType.WithFixedCharset] = List(`application/json`) private val defaultContentTypes: Seq[ContentTypeRange] = defaultMediaTypes.map(ContentTypeRange.apply) private val byteArrayUnmarshaller: FromEntityUnmarshaller[Array[Byte]] = Unmarshaller.byteArrayUnmarshaller.forContentTypes(unmarshallerContentTypes: _*) private def sourceByteStringMarshaller( mediaType: MediaType.WithFixedCharset ): Marshaller[SourceOf[ByteString], MessageEntity] = Marshaller[SourceOf[ByteString], MessageEntity] { implicit ec => value => try FastFuture.successful { Marshalling.WithFixedContentType( mediaType, () => HttpEntity(contentType = mediaType, data = value) ) :: Nil } catch { case NonFatal(e) => FastFuture.failed(e) } } private val jsonSourceStringMarshaller = Marshaller.oneOf(mediaTypes: _*)(sourceByteStringMarshaller) private def jsonSource[A](entitySource: SourceOf[A])(implicit codec: JsonValueCodec[A], config: WriterConfig = defaultWriterConfig, support: JsonEntityStreamingSupport ): SourceOf[ByteString] = entitySource .map(writeToArray(_, config)) .map(ByteString(_)) .via(support.framingRenderer) def unmarshallerContentTypes: Seq[ContentTypeRange] = defaultContentTypes def mediaTypes: Seq[MediaType.WithFixedCharset] = defaultMediaTypes /** * HTTP entity => `A` */ implicit def unmarshaller[A](implicit codec: JsonValueCodec[A], config: ReaderConfig = defaultReaderConfig ): FromEntityUnmarshaller[A] = byteArrayUnmarshaller.map { bytes => if (bytes.length == 0) throw Unmarshaller.NoContentException readFromArray[A](bytes, config) } /** * `A` => HTTP entity */ implicit def marshaller[A](implicit codec: JsonValueCodec[A], config: WriterConfig = defaultWriterConfig ): ToEntityMarshaller[A] = { val mediaType = mediaTypes.head val contentType = ContentType.WithFixedCharset(mediaType) Marshaller.withFixedContentType(contentType) { obj => HttpEntity.Strict(contentType, ByteString.fromArrayUnsafe(writeToArray(obj, config))) } } /** * `ByteString` => `A` * * @tparam A * type to decode * @return * unmarshaller for any `A` value */ implicit def fromByteStringUnmarshaller[A](implicit codec: JsonValueCodec[A], config: ReaderConfig = defaultReaderConfig ): Unmarshaller[ByteString, A] = Unmarshaller(_ => bs => Future.fromTry(Try(readFromArray(bs.toArray, config)))) /** * HTTP entity => `Source[A, _]` * * @tparam A * type to decode * @return * unmarshaller for `Source[A, _]` */ implicit def sourceUnmarshaller[A: JsonValueCodec](implicit support: JsonEntityStreamingSupport = EntityStreamingSupport.json(), config: ReaderConfig = defaultReaderConfig ): FromEntityUnmarshaller[SourceOf[A]] = Unmarshaller .withMaterializer[HttpEntity, SourceOf[A]] { implicit ec => implicit mat => entity => def asyncParse(bs: ByteString) = Unmarshal(bs).to[A] def ordered = Flow[ByteString].mapAsync(support.parallelism)(asyncParse) def unordered = Flow[ByteString].mapAsyncUnordered(support.parallelism)(asyncParse) Future.successful { entity.dataBytes .via(support.framingDecoder) .via(if (support.unordered) unordered else ordered) } } .forContentTypes(unmarshallerContentTypes: _*) /** * `SourceOf[A]` => HTTP entity * * @tparam A * type to encode * @return * marshaller for any `SourceOf[A]` value */ implicit def sourceMarshaller[A](implicit codec: JsonValueCodec[A], config: WriterConfig = defaultWriterConfig, support: JsonEntityStreamingSupport = EntityStreamingSupport.json() ): ToEntityMarshaller[SourceOf[A]] = jsonSourceStringMarshaller.compose(jsonSource[A]) }
hseeberger/akka-http-json
akka-http-jsoniter-scala/src/main/scala/de/heikoseeberger/akkahttpjsoniterscala/JsoniterScalaSupport.scala
Scala
apache-2.0
6,019
package rpgboss.editor import scala.swing._ import rpgboss.lib._ import rpgboss.model._ import rpgboss.model.resource._ import rpgboss.editor.imageset.selector._ import rpgboss.editor.misc._ import scalaswingcontrib.event._ import scalaswingcontrib.tree._ import scalaswingcontrib.tree.Tree._ import scala.swing.event._ import rpgboss.editor.uibase._ import rpgboss.editor.dialog.MapPropertiesDialog import java.awt.event.MouseEvent import java.awt.datatransfer.DataFlavor import javax.swing.DropMode import javax.swing.TransferHandler import javax.swing.JTree import javax.swing.JComponent import java.awt.datatransfer.Transferable import java.awt.datatransfer.StringSelection import javax.activation.DataHandler import javax.swing.tree.DefaultTreeModel import javax.swing.tree.TreeNode import rpgboss.editor.Internationalized._ import rpgboss.editor.util.MouseUtil class ProjectPanelMapSelector(sm: StateMaster, projPanel: ProjectPanel) extends MapSelector(sm) { /** * Preserves ordering based on map name. */ def getDropIdx(newMapName: String, siblings: Seq[Node]) = { val largerMapIdx = siblings.indexWhere(_.mapName >= newMapName) if (largerMapIdx == -1) siblings.length else largerMapIdx } /* * Enable dragging and set drag handler. */ tree.dragEnabled = true tree.peer.setDropMode(DropMode.ON) tree.peer.setTransferHandler(new javax.swing.TransferHandler { override def canImport( support: TransferHandler.TransferSupport): Boolean = { if (!support.isDataFlavorSupported(DataFlavor.stringFlavor) || !support.isDrop()) { return false } if (support.getComponent() != tree.peer) { return false } val dropLocation = support.getDropLocation().asInstanceOf[JTree.DropLocation] return dropLocation.getPath() != null } override def getSourceActions(c: JComponent): Int = TransferHandler.MOVE override def createTransferable(c: JComponent): Transferable = { tree.selection.paths.headOption.map { path => if (path.last.mapName.isEmpty()) return null return new StringSelection(path.last.mapName) } return null } override def importData( support: TransferHandler.TransferSupport): Boolean = { if (!canImport(support)) { println("Can't import") return false; } val dropLocation = support.getDropLocation().asInstanceOf[JTree.DropLocation] val dropPathJava = dropLocation.getPath() val dropPath = tree.treePathToPath(dropPathJava) val dropNode = dropPath.last val dropMapName = dropNode.mapName val transferable = support.getTransferable(); val sourceMapName = transferable.getTransferData( DataFlavor.stringFlavor).toString() val sourceMap = sm.getMap(sourceMapName).get assert(allNodes.contains(sourceMapName)) val sourceNode = allNodes.get(sourceMapName).get val sourceOldPath = sourceNode.getPath() val canDrop = // Don't allow drop on itself sourceMapName != dropMapName && // Don't allow drop on existing parent sourceMap.metadata.parent != dropMapName && // Don't allow drop on any of its descendants sourceOldPath != dropPath.take(sourceOldPath.length) // Don't allow dropping on itself or its existing parent. if (canDrop) { // Modify the actual metadata sourceMap.metadata.parent = dropNode.mapName sm.setMap(sourceMapName, sourceMap, markDirty = true) val sourceNewPath = sourceNode.getPath() // Update the tree structure to reflect it. def recursiveCopy(sourcePath: Path[Node], destParentPath: Path[Node]): Unit = { val sourceNode = sourcePath.last val sourceWasExpanded = tree.isExpanded(sourcePath) tree.model.insertUnder( destParentPath, sourceNode, getDropIdx(sourceNode.mapName, tree.model.getChildrenOf(destParentPath))) val destPath = destParentPath :+ sourceNode for (childNode <- tree.model.getChildrenOf(sourcePath)) { val childPath = sourcePath :+ childNode recursiveCopy(childPath, destPath) } if (sourceWasExpanded) tree.expandPath(destPath) else tree.collapsePath(destPath) } recursiveCopy(sourceOldPath, sourceNewPath.dropRight(1)) tree.model.remove(sourceOldPath) highlightWithoutEvent(sourceNode) } return true; } }) /* * Popup actions */ def popupMenuFor(node: Node) = { new RpgPopupMenu { if (node != projectRoot) { contents += new MenuItem(Action(getMessage("Map_Properties") + "...") { val origMap = sm.getMap(node.mapName).get val origMapData = sm.getMapData(node.mapName) val d = new MapPropertiesDialog( projPanel.mainP.topWin, sm, "New Map", origMap, origMapData, (updatedMap, updatedMapData) => { sm.setMap(origMap.name, updatedMap) sm.setMapData(origMap.name, updatedMapData) // Update tree view in case title changed. val oldNode = allNodes.apply(updatedMap.name) val newNode = Node.apply(updatedMap) tree.model.update(oldNode.getPath(), newNode) // Select map again to refresh the map view and tileset selector projPanel.selectMap(Some(updatedMap)) }) d.open() }) contents += new MenuItem(Action(getMessage("Delete")) { val msg = "Are you sure you want to delete map '%s'?".format(node.mapName) val answer = Dialog.showConfirmation(this, msg, "Delete") if (answer == Dialog.Result.Yes) { removeNode(node) sm.removeMap(node.mapName) val map = sm.getMap(node.mapName).get allNodes .get(map.metadata.parent) .map(selectNode(_)) .getOrElse(selectNode(projectRoot)) } }) contents += new MenuItem(Action(getMessage("Duplicate_Map") + "...") { val oldMap = sm.getMap(node.mapName).get val newName = RpgMap.generateName(sm.getProj.data.lastCreatedMapId + 1) val mapMetadataCopy = Utils.deepCopy(oldMap.metadata) mapMetadataCopy.title = "%s (%s)".format( mapMetadataCopy.title, getMessage("Copy")) val mapCopy = RpgMap(sm.getProj, newName, mapMetadataCopy) val mapDataCopy = sm.getMapData(node.mapName).deepcopy() onNewMap(mapCopy, mapDataCopy) }) contents += new Separator } contents += new MenuItem(Action(getMessage("New_Map") + "...") { // Generate a new map with an incremented map id name val newMap = RpgMap.defaultInstance( sm.getProj, RpgMap.generateName(sm.getProj.data.lastCreatedMapId + 1)) newMap.metadata.parent = node.mapName val d = new MapPropertiesDialog( projPanel.mainP.topWin, sm, "New Map", newMap, RpgMap.defaultMapData, onNewMap) d.open() }) } } def onNewMap(newMap: RpgMap, newMapData: RpgMapData) = { val p = sm.getProj sm.setProjData(p.data.copy( lastCreatedMapId = p.data.lastCreatedMapId + 1)) val parentNode = allNodes.get(newMap.metadata.parent).get val newNode = Node(newMap) val parentPath = parentNode.getPath() val parentsChildren = tree.model.getChildrenOf(parentPath) tree.model.insertUnder(parentPath, newNode, getDropIdx(newMap.name, parentsChildren)) // Add to the state master. Don't actually write it ourselves sm.addMap(newMap, Some(newMapData), Dirtiness.Dirty) highlightWithoutEvent(newNode) // select the new map projPanel.selectMap(Some(newMap)) } override def onSelectMap(map: Option[RpgMap]) = { projPanel.selectMap(map) } listenTo(tree.mouse.clicks) reactions += { case e: MouseClicked if e.source == tree => { val (x0, y0) = (e.point.getX().toInt, e.point.getY().toInt) if (MouseUtil.isRightClick(e)) { val clickRow = tree.getRowForLocation(x0, y0) // Temporarily disable selection events while popup in action deafTo(tree.selection) deafTo(tree.mouse.clicks) // The previously selected path val origRow = tree.selection.rows.headOption if (clickRow != -1) tree.selectRows(clickRow) val clickNode = if (clickRow == -1) projectRoot else tree.selection.paths.head.last val menu = popupMenuFor(clickNode) menu.showWithCallback(tree, x0, y0, onHide = () => { origRow.map(p => tree.selectRows(p)) // Renable all eventns listenTo(tree.selection) listenTo(tree.mouse.clicks) }) } } } }
toastythought/rpgboss
editor/src/main/scala/rpgboss/editor/ProjectPanelMapSelector.scala
Scala
agpl-3.0
9,194
// Copyright (c) 2013-2020 Rob Norris and Contributors // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package doobie.util.meta import doobie.util.{Get, Put} class MetaSuite extends munit.FunSuite { test("Meta should exist for primitive types") { Meta[Int] Meta[String] } test("Meta should imply Get") { def foo[A: Meta] = Get[A] } test("Meta should imply Put") { def foo[A: Meta] = Put[A] } }
tpolecat/doobie
modules/core/src/test/scala/doobie/util/meta/MetaSuite.scala
Scala
mit
514
package org.jetbrains.plugins.scala.util import junit.framework.TestCase import org.jetbrains.plugins.scala.AssertionMatchers import org.jetbrains.plugins.scala.project.ScalaLanguageLevel import scala.collection.compat.immutable.ArraySeq import scala.util.Random class BitMaskTest extends TestCase with AssertionMatchers { //noinspection TypeAnnotation object ExampleMask extends BitMaskStorage { val b1 = bool("b1") val n1 = nat(max = 5, "n1") val b2 = bool("b2") val i1 = int(min = -4, max = 22, "i1") val l1 = jEnum[ScalaLanguageLevel]("l1") override val version: Int = finishAndMakeVersion() } def testExampleMask(): Unit = { import ExampleMask._ b1.pos shouldBe 0 b1.chunkSize shouldBe 1 n1.pos shouldBe 1 n1.chunkSize shouldBe 3 b2.pos shouldBe 4 b2.chunkSize shouldBe 1 i1.pos shouldBe 5 i1.chunkSize shouldBe 5 i1.shiftedMax shouldBe 26 l1.pos shouldBe 10 } def testRandom(): Unit = { val rand = new Random(123) def makeRandomMaskStorage(): BitMaskStorage = new BitMaskStorage { for (i <- 0 to rand.nextInt(8)) { try rand.nextInt(3) match { case 0 => bool(s"b$i") case 1 => nat(max = math.abs(rand.nextInt()) min 1, s"n$i") case 2 => val Seq(a, b) = Seq(rand.nextInt(), rand.nextInt()).sorted int(a min b, a max b, s"i$i") case _ => ??? } catch { case a: AssertionError if a.getMessage.contains("Do not have space") => // cheap hack so we don't have to test if there is still space left } } override val version: Int = finishAndMakeVersion() } def makeRandomValueFor(mask: BitMask): mask.T = { val v = mask match { case BitMask.Bool(_) => rand.nextBoolean() case BitMask.Nat(_, max) => rand.nextInt(max + 1) case BitMask.Integer(_, min, max) => rand.between(min, max + 1) case _ => ??? } v.asInstanceOf[mask.T] } for (_ <- 0 to 10000) { val storage = makeRandomMaskStorage() val masks = storage.members.values.to(ArraySeq) var current = 0 for (_ <- 0 to 1000) { val oldValues = masks.map(_.read(current)) val i = rand.nextInt(masks.length) val mask = masks(i) val newValue = makeRandomValueFor(mask) val newCurrent = mask.write(current, newValue) // test if write was correct mask.read(newCurrent) shouldBe newValue // test if other values are still the same val newValues = masks.map(_.read(newCurrent)) newValues.patch(i, Nil, 1) shouldBe oldValues.patch(i, Nil, 1) current = newCurrent } } } }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/util/BitMaskTest.scala
Scala
apache-2.0
2,741
package com.crobox.clickhouse.dsl.column import com.crobox.clickhouse.DslIntegrationSpec import com.crobox.clickhouse.dsl._ class IPFunctionsTest extends DslIntegrationSpec { "Tokenization" should "succeed for IPFunctions" in { val num = toUInt32(1) r(iPv4NumToString(num)) shouldBe "0.0.0.1" r(iPv4StringToNum("0.0.0.1")) shouldBe "1" r(iPv4NumToStringClassC(num)) shouldBe "0.0.0.xxx" r(iPv6NumToString(toFixedString("0", 16))) shouldBe "3000::" r(iPv6StringToNum("3000::")) shouldBe "0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0\\\\0" } }
crobox/clickhouse-scala-client
dsl/src/test/scala/com/crobox/clickhouse/dsl/column/IPFunctionsTest.scala
Scala
lgpl-3.0
569
package de.htwg.zeta.common.format.project.gdsl import de.htwg.zeta.common.format.project.gdsl.diagram.DiagramFormat import de.htwg.zeta.common.models.project.gdsl.diagram.Diagram import play.api.libs.json.JsObject import play.api.libs.json.JsResult import play.api.libs.json.JsValue import play.api.libs.json.Json import play.api.libs.json.OFormat import play.api.libs.json.Reads import play.api.libs.json.Writes class DiagramsFormat( diagramFormat: DiagramFormat, sDiagrams: String ) extends OFormat[List[Diagram]] { override def writes(clazz: List[Diagram]): JsObject = Json.obj( sDiagrams -> Writes.list(diagramFormat).writes(clazz) ) override def reads(json: JsValue): JsResult[List[Diagram]] = (json \\ sDiagrams).validate(Reads.list(diagramFormat)) } object DiagramsFormat { def apply(): DiagramsFormat = new DiagramsFormat(DiagramFormat(), "diagrams") }
Zeta-Project/zeta
api/common/src/main/scala/de/htwg/zeta/common/format/project/gdsl/DiagramsFormat.scala
Scala
bsd-2-clause
890
package mesosphere.marathon package core.storage.store.impl.cache import java.util.UUID import akka.Done import akka.http.scaladsl.marshalling.Marshaller import akka.http.scaladsl.unmarshalling.Unmarshaller import akka.stream.scaladsl.Sink import com.mesosphere.utils.zookeeper.ZookeeperServerTest import mesosphere.AkkaUnitTest import mesosphere.marathon.core.base.JvmExitsCrashStrategy import mesosphere.marathon.core.storage.store.impl.InMemoryTestClass1Serialization import mesosphere.marathon.core.storage.store.impl.memory.InMemoryPersistenceStore import mesosphere.marathon.core.storage.store.impl.zk.{RichCuratorFramework, ZkPersistenceStore, ZkTestClass1Serialization} import mesosphere.marathon.core.storage.store.{IdResolver, PersistenceStoreTest, TestClass1} import mesosphere.marathon.metrics.dummy.DummyMetrics import mesosphere.marathon.storage.store.InMemoryStoreSerialization import mesosphere.marathon.test.SettableClock import scala.concurrent.duration._ class LazyCachingPersistenceStoreTest extends AkkaUnitTest with PersistenceStoreTest with ZkTestClass1Serialization with ZookeeperServerTest with InMemoryStoreSerialization with InMemoryTestClass1Serialization { private val metrics = DummyMetrics private def cachedInMemory = { val store = LazyCachingPersistenceStore(metrics, new InMemoryPersistenceStore(metrics)) store.markOpen() store } private def withLazyVersionCaching = { val store = LazyVersionCachingPersistentStore(metrics, new InMemoryPersistenceStore(metrics)) store.markOpen() store } private def cachedZk = { val root = UUID.randomUUID().toString val client = RichCuratorFramework(zkClient(namespace = Some(root)), JvmExitsCrashStrategy) val store = LazyCachingPersistenceStore(metrics, new ZkPersistenceStore(metrics, client)) store.markOpen() store } behave like basicPersistenceStore("LazyCache(InMemory)", cachedInMemory) behave like basicPersistenceStore("LazyCache(Zk)", cachedZk) behave like basicPersistenceStore("LazyVersionedCache(Zk)", withLazyVersionCaching) // TODO: Mock out the backing store. behave like cachingPersistenceStore("cache internals(InMemory)", withLazyVersionCaching) def cachingPersistenceStore[K, C, Serialized](name: String, newStore: => LazyVersionCachingPersistentStore[K, C, Serialized])(implicit ir: IdResolver[String, TestClass1, C, K], m: Marshaller[TestClass1, Serialized], um: Unmarshaller[Serialized, TestClass1] ): Unit = { name should { "purge the cache appropriately" in { implicit val clock = new SettableClock() val store = newStore 1.to(100).foreach { i => val obj = TestClass1("abc", i) clock.advanceBy(1.second) store.store("task-1", obj).futureValue should be(Done) } store.versionedValueCache.size should be(100) // sanity store.maybePurgeCachedVersions(maxEntries = 50, purgeCount = 10) store.versionedValueCache.size > 40 should be(true) store.versionedValueCache.size <= 50 should be(true) } "caches versions independently" in { implicit val clock = new SettableClock() val store = newStore val original = TestClass1("abc", 1) clock.advanceBy(1.minute) val updated = TestClass1("def", 2) store.store("task-1", original).futureValue should be(Done) store.store("task-1", updated).futureValue should be(Done) store.store("task-1", updated).futureValue should be(Done) // redundant store should not lead to dup data val storageId = ir.toStorageId("task-1", None) store.versionedValueCache.size should be(2) store.versionedValueCache((storageId, original.version)) should be(Some(original)) store.versionedValueCache((storageId, updated.version)) should be(Some(updated)) } "invalidates all cached versions upon deletion" in { implicit val clock = new SettableClock() val store = newStore val original = TestClass1("abc", 1) clock.advanceBy(1.minute) val updated = TestClass1("def", 2) store.store("task-1", original).futureValue should be(Done) store.store("task-1", updated).futureValue should be(Done) store.deleteVersion("task-1", original.version).futureValue should be(Done) ir.toStorageId("task-1", None) store.versionCache.size should be(0) store.versionedValueCache.size should be(0) } "reload versionCache upon versions request" in { implicit val clock = new SettableClock() val store = newStore val original = TestClass1("abc", 1) clock.advanceBy(1.minute) val updated = TestClass1("def", 2) store.store("task-1", original).futureValue should be(Done) store.store("task-1", updated).futureValue should be(Done) store.versionCache.clear() store.versionedValueCache.clear() store.versions("task-1").runWith(Sink.seq).futureValue should contain theSameElementsAs(Seq(original.version, updated.version)) store.versionedValueCache.size should be(0) } "reload versionedValueCache upon versioned get requests" in { implicit val clock = new SettableClock() val store = newStore val original = TestClass1("abc", 1) clock.advanceBy(1.minute) val updated = TestClass1("def", 2) store.store("task-1", original).futureValue should be(Done) store.store("task-1", updated).futureValue should be(Done) store.versionCache.clear() store.versionedValueCache.clear() store.get("task-1", original.version).futureValue should be(Some(original)) // sanity check val storageId = ir.toStorageId("task-1", None) store.versionedValueCache.size should be(1) store.versionedValueCache.contains((storageId, original.version)) should be(true) store.versionCache.size should be(0) } "reload versionedValueCache upon unversioned get requests" in { implicit val clock = new SettableClock() val store = newStore val original = TestClass1("abc", 1) clock.advanceBy(1.minute) val updated = TestClass1("def", 2) store.store("task-1", original).futureValue should be(Done) store.store("task-1", updated).futureValue should be(Done) store.versionCache.clear() store.versionedValueCache.clear() store.get("task-1").futureValue should be(Some(updated)) // sanity check val storageId = ir.toStorageId("task-1", None) store.versionedValueCache.size should be(1) store.versionedValueCache.contains((storageId, updated.version)) should be(true) } "versions available in the persistence store are cached correctly" in { implicit val clock = new SettableClock() val store = newStore val underlying = store.store // 1 version available in the cache and 2 in the underlying store store.store("test", TestClass1("abc", 1)).futureValue should be(Done) clock.advanceBy(1.minute) underlying.store("test", TestClass1("abc", 2)).futureValue should be(Done) clock.advanceBy(1.minute) underlying.store("test", TestClass1("abc", 3)).futureValue should be(Done) store.versionCache.size should be(0) // a call to versions will update the cache store.versions("test").runWith(Sink.seq).futureValue should have size 3 store.versionCache should have size 1 store.versionCache((ir.category, ir.toStorageId("test", None))) should have size 3 } } } }
mesosphere/marathon
src/test/scala/mesosphere/marathon/core/storage/store/impl/cache/LazyCachingPersistenceStoreTest.scala
Scala
apache-2.0
7,731
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.visor.commands.cache import org.apache.ignite.cluster.{ClusterGroupEmptyException, ClusterNode} import org.apache.ignite.visor.visor._ import org.apache.ignite.internal.visor.cache.VisorCacheStopTask import org.apache.ignite.internal.visor.util.VisorTaskUtils._ /** * ==Overview== * Visor 'stop' command implementation. * * ====Specification==== * {{{ * cache -c=<cache name> -stop * }}} * * ====Arguments==== * {{{ * <cache-name> * Name of the cache. * }}} * * ====Examples==== * {{{ * cache -c=@c0 -stop * Stops cache with name taken from 'c0' memory variable. * }}} */ class VisorCacheStopCommand { /** * Prints error message and advise. * * @param errMsgs Error messages. */ private def scold(errMsgs: Any*) { assert(errMsgs != null) warn(errMsgs: _*) warn("Type 'help cache' to see how to use this command.") } private def error(e: Exception) { var cause: Throwable = e while (cause.getCause != null) cause = cause.getCause scold(cause.getMessage) } /** * ===Command=== * Stop cache with specified name. * * ===Examples=== * <ex>cache -c=cache -stop</ex> * Stop cache with name 'cache'. * * @param argLst Command arguments. */ def stop(argLst: ArgList, node: Option[ClusterNode]) { val cacheArg = argValue("c", argLst) val cacheName = cacheArg match { case None => null // default cache. case Some(s) if s.startsWith("@") => warn("Can't find cache variable with specified name: " + s, "Type 'cache' to see available cache variables." ) return case Some(name) => name } val grp = try { groupForDataNode(node, cacheName) } catch { case _: ClusterGroupEmptyException => scold(messageNodeNotFound(node, cacheName)) return } val dflt = if (batchMode) "y" else "n" ask(s"Are you sure you want to stop cache: ${escapeName(cacheName)}? (y/n) [$dflt]: ", dflt) match { case "y" | "Y" => try { executeRandom(grp, classOf[VisorCacheStopTask], cacheName) println("Visor successfully stop cache: " + escapeName(cacheName)) } catch { case _: ClusterGroupEmptyException => scold(messageNodeNotFound(node, cacheName)) case e: Exception => error(e) } case "n" | "N" => case x => nl() warn("Invalid answer: " + x) } } } object VisorCacheStopCommand { /** Singleton command. */ private val cmd = new VisorCacheStopCommand /** * Singleton. */ def apply() = cmd }
tkpanther/ignite
modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheStopCommand.scala
Scala
apache-2.0
3,784
package com.github.jw3.di import java.util.UUID import akka.actor.Actor import com.github.jw3.di.SimpleInjectectionSpec._ import com.github.jw3.di.test.InjectSpec import net.codingwell.scalaguice.ScalaModule import org.scalatest.Matchers import scala.util.Random class SimpleInjectionSpec extends InjectSpec with Matchers { "simple injection" should { injectTest("throw if trying to inject actor, regardless of binding presence") { implicit sys => intercept[Exception] {inject[Actor].required} } injectTest("throw for boxed when no bindings are present") { implicit sys => intercept[Exception] {inject[Int].required} } injectTest("throw for String when no bindings are present") { implicit sys => intercept[Exception] {inject[String].required} } injectTest("throw for trait when no bindings are present") { implicit sys => intercept[Exception] {inject[IDontExist].required} } injectTest("inject boxed when bindings are present", Seq(SimpleBindings)) { implicit sys => inject[Int].required shouldBe intVal } injectTest("inject String when bindings are present", Seq(SimpleBindings)) { implicit sys => inject[String].required shouldBe stringVal } injectTest("inject trait when bindings are present", Seq(SimpleBindings)) { implicit sys => inject[IDoExist].required shouldBe DoesExists } } } object SimpleInjectectionSpec { trait IDontExist trait IDoExist object DoesExists extends IDoExist val stringVal = UUID.randomUUID.toString val intVal = Random.nextInt() object SimpleBindings extends ScalaModule { override def configure(): Unit = { bind[IDoExist].toInstance(DoesExists) bind[String].toInstance(stringVal) bind[Int].toInstance(intVal) } } }
jw3/akka-injects
src/test/scala/com/github/jw3/di/SimpleInjectionSpec.scala
Scala
apache-2.0
1,803
package gatsbyexamples import io.gatling.core.Predef._ import com.themillhousegroup.gatsby.GatsbySimulation import com.themillhousegroup.gatsby.GatsbyHttpActionBuilder._ import io.gatling.http.Predef._ /** * For this to pass, you'll need some device/tool configured as follows: * * GATLING => localhost:8888 => <THING> => localhost:9999 => STUBBY * * There are two scenarios here. * The positive one checks that <THING> is doing port-forwarding for /public * The negative one asserts that access to /secret gets blocked. */ class FilteringSimulation extends GatsbySimulation(9999) { val httpConf = http.baseURL("http://localhost:8888") val scn1 = scenario("AllowedPage") .exec(withStubby(http("allowed-req-1").get("/public"))) .pause(1) val scn2 = scenario("BlockedPage") .exec(withStubby(http("blocked-req-1").get("/secret").check(status.is(403)))) .pause(1) setUp( scn1.inject(atOnceUsers(1)), scn2.inject(atOnceUsers(1)) ) .protocols(httpConf) .assertions( global.successfulRequests.count.is(2), stubby.requestsSeen.is(1), stubby.requestsSeenFor("/secret").is(0)) }
themillhousegroup/gatsby
user-files/simulations/gatsbyexamples/FilteringGatsbySimulation.scala
Scala
mit
1,146
package org.jetbrains.plugins.scala package lang package psi package impl package base package patterns import com.intellij.lang.ASTNode import com.intellij.psi._ import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._ import org.jetbrains.plugins.scala.lang.psi.types.ScType /** * @author Alexander Podkhalyuzin * Date: 28.02.2008 */ class ScWildcardPatternImpl(node: ASTNode) extends ScalaPsiElementImpl (node) with ScWildcardPattern { override def accept(visitor: PsiElementVisitor): Unit = { visitor match { case visitor: ScalaElementVisitor => super.accept(visitor) case _ => super.accept(visitor) } } override def isIrrefutableFor(t: Option[ScType]): Boolean = true override def toString: String = "WildcardPattern" }
triggerNZ/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScWildcardPatternImpl.scala
Scala
apache-2.0
833
/* Copyright 2012 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.algebird import java.util.PriorityQueue import com.twitter.algebird.mutable.PriorityQueueMonoid import org.scalacheck.Arbitrary import org.scalacheck.Prop._ import scala.collection.JavaConverters._ class TopKTests extends CheckProperties { import com.twitter.algebird.BaseProperties._ val SIZE = 10 implicit def qmonoid = new PriorityQueueMonoid[Int](SIZE) implicit def queueArb = Arbitrary { implicitly[Arbitrary[List[Int]]].arbitrary.map { qmonoid.build(_) } } def q2l(q1: PriorityQueue[Int]): List[Int] = q1.iterator.asScala.toList.sorted def eqFn(q1: PriorityQueue[Int], q2: PriorityQueue[Int]): Boolean = { q2l(q1) == q2l(q2) } def pqIsCorrect(items: List[List[Int]]): Boolean = { val correct = items.flatten.sorted.take(SIZE) // Have to do this last since this monoid is mutating inputs q2l(Monoid.sum(items.map { l => qmonoid.build(l) })) == correct } property("PriorityQueueMonoid works") { forAll { (items: List[List[Int]]) => pqIsCorrect(items) } } /** * The following were specific bugs that we failed some prior * scalacheck (yay for randomized testing) */ val pqPriorBugs = Seq(List(List(1, 1, 1, 2), List(0, 0, 0, 0, 0, 0, 0))) property("Specific regressions are handled") { pqPriorBugs.forall(pqIsCorrect(_)) } property("PriorityQueueMonoid is a Monoid") { monoidLawsEq[PriorityQueue[Int]](eqFn) } implicit def tkmonoid = new TopKMonoid[Int](SIZE) implicit def topkArb = Arbitrary { implicitly[Arbitrary[List[Int]]].arbitrary.map { tkmonoid.build(_) } } property("TopKMonoid works") { forAll { (its: List[List[Int]]) => val correct = its.flatten.sorted.take(SIZE) Equiv[List[Int]].equiv(Monoid.sum(its.map { l => tkmonoid.build(l) }).items, correct) } } property("TopKMonoid is a Monoid") { monoidLawsEq[PriorityQueue[Int]](eqFn) } }
nvoron23/algebird
algebird-test/src/test/scala/com/twitter/algebird/TopKTests.scala
Scala
apache-2.0
2,470
package modules import com.google.inject.AbstractModule import models.daos.{ AuthTokenDAO, AuthTokenDAOImpl } import models.services.{ AuthTokenService, AuthTokenServiceImpl } import net.codingwell.scalaguice.ScalaModule /** * The base Guice module. */ class BaseModule extends AbstractModule with ScalaModule { /** * Configures the module. */ def configure(): Unit = { bind[AuthTokenDAO].to[AuthTokenDAOImpl] bind[AuthTokenService].to[AuthTokenServiceImpl] } }
raisercostin/play-silhouette-slick-seed
modules/silhouette/app/modules/BaseModule.scala
Scala
apache-2.0
487
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.env import akka.actor.ActorSystem import akka.testkit.TestKit import org.scalatest.BeforeAndAfterEach import org.scalatest.flatspec.AnyFlatSpecLike import org.scalatest.matchers.should.Matchers class EnvironmentSpec extends TestKit(ActorSystem("EnvironmentSpec")) with AnyFlatSpecLike with Matchers with BeforeAndAfterEach{ override def beforeEach(): Unit = { EnvironmentResolverRegistry(system).register(DummyProdEnvironmentResolver) } override def afterEach(): Unit = { EnvironmentResolverRegistry(system).environmentResolvers = List[EnvironmentResolver]() } "EnvironmentResolverRegistry" should "contain DummyProdEnvironmentResolver" in { EnvironmentResolverRegistry(system).environmentResolvers should have size 1 EnvironmentResolverRegistry(system).environmentResolvers.head should be (DummyProdEnvironmentResolver) } it should "resolve the environment" in { EnvironmentResolverRegistry(system).resolve should be (PROD) } it should "give priority to resolvers in the reverse order of registration" in { EnvironmentResolverRegistry(system).register(DummyQAEnvironmentResolver) EnvironmentResolverRegistry(system).resolve should be (QA) } it should "try the chain of resolvers till the environment can be resolved" in { EnvironmentResolverRegistry(system).register(DummyNotResolveEnvironmentResolver) EnvironmentResolverRegistry(system).resolve should be (PROD) } it should "unregister a resolver" in { EnvironmentResolverRegistry(system).register(DummyQAEnvironmentResolver) EnvironmentResolverRegistry(system).resolve should be (QA) EnvironmentResolverRegistry(system).unregister("DummyQAEnvironmentResolver") EnvironmentResolverRegistry(system).resolve should be (PROD) } it should "not throw exceptions when unregister non-existing resolver" in { val resolverCount = EnvironmentResolverRegistry(system).environmentResolvers.size noException shouldBe thrownBy { EnvironmentResolverRegistry(system).unregister("DummyQAEnvironmentResolver") } EnvironmentResolverRegistry(system).environmentResolvers.size shouldBe resolverCount } it should "represent the correct lowercase name" in { PROD.lowercaseName shouldBe "prod" QA.lowercaseName shouldBe "qa" DEV.lowercaseName shouldBe "dev" } }
akara/squbs
squbs-ext/src/test/scala/org/squbs/env/EnvironmentSpec.scala
Scala
apache-2.0
2,930
/*§ =========================================================================== EighthBridge =========================================================================== Copyright (C) 2016 Gianluca Costa =========================================================================== Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================================================================== */ package info.gianlucacosta.eighthbridge.fx.canvas import info.gianlucacosta.eighthbridge.graphs.point2point.visual.{VisualGraph, VisualLink, VisualVertex} /** * JavaFX node rendering the graph background and the selection rectangle */ trait BackgroundNode[ V <: VisualVertex[V], L <: VisualLink[L], G <: VisualGraph[V, L, G] ] extends GraphCanvasNode[V, L, G]
giancosta86/EighthBridge
src/main/scala/info/gianlucacosta/eighthbridge/fx/canvas/BackgroundNode.scala
Scala
apache-2.0
1,280
package org.automanlang.core.logging import org.scalatest._ import java.util.UUID import org.automanlang.test._ import org.automanlang.adapters.mturk.DSL._ import org.automanlang.adapters.mturk.mock.MockSetup class RadioDistribMemoTest extends FlatSpec with Matchers { "A radio button distribution program" should "correctly recall answers at no cost" in { val confidence = 0.95 implicit val mt = mturk ( access_key_id = UUID.randomUUID().toString, secret_access_key = UUID.randomUUID().toString, use_mock = MockSetup(balance = 8.00), logging = LogConfig.TRACE_MEMOIZE_VERBOSE, log_verbosity = LogLevelDebug() ) // test params val sample_size = 30 val mock_answers = genAnswers( Array('oscar, 'kermit, 'spongebob, 'cookie, 'count), Array("0.02", "0.14", "0.78", "0.05", "0.01"), sample_size ).toList def which_one(text: String) = radios ( sample_size = sample_size, budget = 8.00, text = text, options = List( choice('oscar, "Oscar the Grouch"), choice('kermit, "Kermit the Frog"), choice('spongebob, "Spongebob Squarepants"), choice('cookie, "Cookie Monster"), choice('count, "The Count") ), mock_answers = makeMocksAt(mock_answers.toList, 0) ) def which_one2(text: String) = radios ( sample_size = sample_size, budget = 8.00, text = text, options = List( choice('oscar, "Oscar the Grouch"), choice('kermit, "Kermit the Frog"), choice('spongebob, "Spongebob Squarepants"), choice('cookie, "Cookie Monster"), choice('count, "The Count") ), mock_answers = List() ) automan(mt, test_mode = true, in_mem_db = true) { which_one("Which one of these does not belong?").answer match { case Answers(values, cost, _, _) => println("Answer: '" + value + "', cost: '" + cost + "'") compareDistributions(mock_answers, values) should be (true) cost should be (BigDecimal(0.06) * sample_size) case _ => fail() } which_one2("Which one of these does not belong?").answer match { case Answers(values, cost, _, _) => println("Answer: '" + value + "', cost: '" + cost + "'") compareDistributions(mock_answers, values) should be (true) (cost == BigDecimal(0.00)) should be (true) case _ => fail() } } } }
dbarowy/AutoMan
libautoman/src/test/scala/org/automanlang/core/logging/RadioDistribMemoTest.scala
Scala
gpl-2.0
2,486
package xyz.hyperreal.cramsite.dao import slick.driver.H2Driver.api._ import com.github.tototoshi.slick.H2JodaSupport._ import org.joda.time.Instant import spray.json.DefaultJsonProtocol._ import concurrent._ import concurrent.ExecutionContext.Implicits.global import xyz.hyperreal.cramsite._ case class User( name: Option[String], email: Option[String], password: Option[String], pid: Int, registered: Instant, status: Byte, id: Option[Int] = None ) object User { implicit val user = jsonFormat7(User.apply) } class UsersTable(tag: Tag) extends Table[User](tag, "users") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def name = column[Option[String]]("name") def email = column[Option[String]]("email") def password = column[Option[String]]("password") def pid = column[Int]("pid") def registered = column[Instant]("registered") def status = column[Byte]("status") def * = (name, email, password, pid, registered, status, id.?) <> (User.apply _ tupled, User.unapply) def idx_users_name = index("idx_users_name", name, unique = true) def idx_users_email = index("idx_users_email", email, unique = true) def idx_users_password = index("idx_users_password", password) } object Users extends TableQuery(new UsersTable(_)) { def find(id: Int): Future[Option[User]] = db.run( filter(_.id === id).result ) map (_.headOption) def findByName( name: String ) = db.run( filter(_.name === name).result ) map (_.headOption) def findByEmail( email: String ) = db.run( filter(_.email === email).result ) map (_.headOption) def find( email: String, password: String ) = db.run( filter(r => r.email === email && r.password === password).result ) map (_.headOption) def create( name: Option[String], email: Option[String], password: Option[String], pid: Int, status: Int ) = db.run( (this returning map(_.id) into ((user, id) => user.copy(id = Some(id)))) += User(name, email, password, pid, Instant.now, status.asInstanceOf[Byte]) ) def update( id: Int, name: String, email: String, password: String, status: Int ) = db.run( filter(_.id === id) map (u => (u.name, u.email, u.password, u.status)) update (Some(name), Some(email), Some(password), status.toByte) ) // def update( id: Int, pid: Int ) = // db.run( filter(_.id === id) map (u => (u.pid)) update Some(pid) ) def delete(id: Int): Future[Int] = { db.run(filter(_.id === id).delete) } def list: Future[Seq[User]] = db.run(this.result) def count = dbrun( this.length.result ) } case class Pair( fileid: Int, front: String, back: String, id: Option[Int] = None ) object Pair { implicit val pair = jsonFormat4(Pair.apply) } class PairsTable(tag: Tag) extends Table[Pair](tag, "pairs") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def fileid = column[Int]("fileid") def front = column[String]("front") def back = column[String]("back") def * = (fileid, front, back, id.?) <> (Pair.apply _ tupled, Pair.unapply) def file_fk = foreignKey("pairs_file_fk", fileid, Files)(_.id, onDelete=ForeignKeyAction.Cascade) } object Pairs extends TableQuery(new PairsTable(_)) { def find(fileid: Int): Future[Seq[Pair]] = db.run( filter(_.fileid === fileid) result ) def create( fileid: Int, front: String, back: String ) = db.run( this += Pair(fileid, front, back) ) def delete( id: Int ): Future[Int] = db.run( filter (_.id === id) delete ) def deleteByFileid(fileid: Int): Future[Int] = { db.run(filter(_.fileid === fileid).delete) } def update( id: Int, front: String, back: String ) = db.run( filter(_.id === id) map (p => (p.front, p.back)) update (front, back) ) def list: Future[Seq[Pair]] = db.run(this.result) } case class Tally( userid: Int, pairid: Int, fileid: Int, foreward: Int, backward: Int ) object Tally { implicit val pair = jsonFormat5(Tally.apply) } class TalliesTable(tag: Tag) extends Table[Tally](tag, "tallies") { def userid = column[Int]("userid") def pairid = column[Int]("pairid") def fileid = column[Int]("fileid") def foreward = column[Int]("foreward") def backward = column[Int]("backward") def * = (userid, pairid, fileid, foreward, backward) <> (Tally.apply _ tupled, Tally.unapply) def pk = primaryKey("pk_tallies", (userid, pairid)) def idx_user = index("idx_tallies_user", userid) def idx_file = index("idx_tallies_file", fileid) def user_fk = foreignKey("tallies_user_fk", userid, Users)(_.id, onDelete=ForeignKeyAction.Cascade) def pair_fk = foreignKey("tallies_pair_fk", pairid, Pairs)(_.id, onDelete=ForeignKeyAction.Cascade) def file_fk = foreignKey("tallies_file_fk", fileid, Files)(_.id, onDelete=ForeignKeyAction.Cascade) } object Tallies extends TableQuery(new TalliesTable(_)) { def findByFileid( fileid: Int, userid: Int ): Future[Seq[Tally]] = db.run( filter(t => t.fileid === fileid && t.userid === userid) result ) def findByPairid( pairid: Int, userid: Int ) = db.run( filter(t => t.pairid === pairid && t.userid === userid) result ) map (_.headOption) def create( userid: Int, pairid: Int, fileid: Int ) = db.run( this += Tally(userid, pairid, fileid, 0, 0) ) def delete( fileid: Int, userid: Int ): Future[Int] = db.run( filter(t => t.fileid === fileid && t.userid === userid).delete ) def update( userid: Int, pairid: Int, foreward: Int, backward: Int ) = db.run( filter(t => t.pairid === pairid && t.userid === userid) map (p => (p.foreward, p.backward)) update (foreward, backward) ) def list: Future[Seq[Tally]] = db.run(this.result) } case class File( name: String, description: String, created: Instant, parentid: Option[Int], visible: Boolean, contents: Option[String], imageid: Option[Int], id: Option[Int] = None ) object File { implicit val file = jsonFormat8(File.apply) } class FilesTable(tag: Tag) extends Table[File](tag, "files") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def name = column[String]("name") def description = column[String]("description") def created = column[Instant]("date") def parentid = column[Option[Int]]("parentid") def visible = column[Boolean]("visible") def contents = column[Option[String]]("contents") def imageid = column[Option[Int]]("imageid") def * = (name, description, created, parentid, visible, contents, imageid, id.?) <> (File.apply _ tupled, File.unapply) def parent_fk = foreignKey("files_parent_fk", parentid, Files)(_.id.?, onDelete=ForeignKeyAction.Cascade) def image_fk = foreignKey("files_image_fk", imageid, Medias)(_.id.?, onDelete=ForeignKeyAction.SetNull) def idx_files_name = index("idx_files_name", name) def idx_files_name_parent = index("idx_files_name_parent", (name, parentid), unique = true ) } object Files extends TableQuery(new FilesTable(_)) { def find(id: Int): Future[Option[File]] = db.run( filter(_.id === id) result ) map (_.headOption) def findUnder(parentid: Int) = db.run( filter (f => f.visible && f.parentid === parentid) sortBy (_.name.asc) result ) def find(parentid: Int, name: String) = db.run( filter (f => f.parentid.isDefined && f.name === name && f.parentid === parentid) result ) map (_.headOption) def findRoot = dbrun( filter (f => f.parentid.isEmpty) result ) def create( name: String, description: String, parentid: Option[Int], visible: Boolean, contents: Option[String], imageid: Option[Int] ) = db.run( (this returning map(_.id) into ((file, id) => file.copy(id=Some(id)))) += File(name, description, Instant.now, parentid, visible, contents, imageid) ) def delete(id: Int): Future[Int] = { db.run(filter(_.id === id).delete) } def update( id: Int, name: String, description: String ) = db.run( filter(_.id === id) map (f => (f.name, f.description)) update (name, description) ) def list: Future[Seq[File]] = db.run(this.result) } case class Media( userid: Int, data: Array[Byte], extension: String, id: Option[Int] = None ) class MediasTable(tag: Tag) extends Table[Media](tag, "medias") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def userid = column[Int]("userid") def data = column[Array[Byte]]("data") def extension = column[String]("extension") def * = (userid, data, extension, id.?) <> (Media.tupled, Media.unapply) } object Medias extends TableQuery(new MediasTable(_)) { def find(id: Int): Future[Option[Media]] = db.run( filter(_.id === id) result ) map (_.headOption) def findByUserid(userid: Int) = filter (_.userid === userid) def create( userid: Int, data: Array[Byte], extension: String ) = db.run( this returning map(_.id) += Media(userid, data, extension) ) def delete(userid: Int): Future[Int] = { db.run(filter(_.userid === userid).delete) } def list: Future[Seq[Media]] = db.run(this.result) } case class Favorite( userid: Int, fileid: Int ) class FavoritesTable(tag: Tag) extends Table[Favorite](tag, "favorites") { def userid = column[Int]("userid") def fileid = column[Int]("fileid") def * = (userid, fileid) <> (Favorite.tupled, Favorite.unapply) def pk = primaryKey("pk_favorites", (userid, fileid)) def user_fk = foreignKey("favorites_user_fk", userid, Users)(_.id, onDelete=ForeignKeyAction.Cascade) def file_fk = foreignKey("favorites_file_fk", fileid, Files)(_.id, onDelete=ForeignKeyAction.Cascade) def idx_favorites_user_file = index("idx_favorites_user_file", (userid, fileid), unique = true ) } object Favorites extends TableQuery(new FavoritesTable(_)) { def findByUserid(userid: Int) = filter (_.userid === userid) def create( userid: Int, fileid: Int ) = db.run( this += Favorite(userid, fileid) ) def delete( userid: Int, fileid: Int ): Future[Int] = { db.run(filter(f => f.userid === userid && f.fileid === fileid).delete) } def list: Future[Seq[Favorite]] = db.run(this.result) } case class Visit( ip: String, host: Option[String], path: String, referrer: Option[String], date: Instant, userid: Option[Int], id: Option[Int] = None ) object Visit { implicit val visit = jsonFormat7(Visit.apply) } class VisitsTable(tag: Tag) extends Table[Visit](tag, "visits") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def ip = column[String]("ip") def host = column[Option[String]]("host") def path = column[String]("path") def referrer = column[Option[String]]("referrer") def date = column[Instant]("date") def userid = column[Option[Int]]("userid") def * = (ip, host, path, referrer, date, userid, id.?) <> (Visit.apply _ tupled, Visit.unapply) def user_fk = foreignKey("visits_user_fk", userid, Users)(_.id.?, onDelete=ForeignKeyAction.SetNull) def idx_visits_ip = index("idx_visits_ip", ip) def idx_visits_referrer = index("idx_visits_referrer", referrer) } object Visits extends TableQuery(new VisitsTable(_)) { def find(id: Int): Future[Option[Visit]] = db.run( filter(_.id === id) result ) map (_.headOption) def create( ip: String, host: Option[String], path: String, referrer: Option[String], date: Instant, userid: Option[Int] ) = db.run( this returning map(_.id) += Visit(ip, host, path, referrer, date, userid) ) def list: Future[Seq[Visit]] = db.run(this.result) }
edadma/cram-site
src/main/scala/DAO.scala
Scala
mit
11,044
import java.io.File import testgen.TestSuiteBuilder._ import testgen._ object SeriesTestGenerator { def main(args: Array[String]): Unit = { val file = new File("src/main/resources/largest-series-product.json") def toExpected(any: Any): String = { any match { case -1 => "None" case i: Int => s"Some($i)" case s: String => val quot = quote(s) s"$quot$s$quot" case _ => throw new IllegalStateException("Invalid expected val -" + any) } } def toString(expected: CanonicalDataParser.Expected): String = { expected match { case Left(error) => s"None" case Right(exp) => toExpected(exp) } } def fromLabeledTestFromInput(argNames: String*): ToTestCaseData = withLabeledTest { sut => labeledTest => val args = sutArgsFromInput(labeledTest.result, argNames: _*) val property = labeledTest.property val sutCall = s"""Series.$property($args)""" val expected = toString(labeledTest.expected) TestCaseData(labeledTest.description, sutCall, expected) } val code = TestSuiteBuilder.build(file, fromLabeledTestFromInput("span", "digits")) println(s"-------------") println(code) println(s"-------------") } }
ricemery/xscala
testgen/src/main/scala/SeriesTestGenerator.scala
Scala
mit
1,325
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.api.batch.sql.validation import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.api.scala._ import org.apache.flink.table.api.scala._ import org.apache.flink.table.api.{Types, ValidationException} import org.apache.flink.table.utils.{MemoryTableSourceSinkUtil, TableTestBase} import org.junit._ class InsertIntoValidationTest extends TableTestBase { @Test(expected = classOf[ValidationException]) def testInconsistentLengthInsert(): Unit = { val util = batchTestUtil() util.addTable[(Int, Long, String)]("sourceTable", 'a, 'b, 'c) val fieldNames = Array("d", "e") val fieldTypes: Array[TypeInformation[_]] = Array(Types.INT, Types.LONG) val sink = new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink util.tableEnv.registerTableSink("targetTable", sink.configure(fieldNames, fieldTypes)) val sql = "INSERT INTO targetTable SELECT a, b, c FROM sourceTable" // must fail because table sink schema has too few fields util.tableEnv.sqlUpdate(sql) } @Test(expected = classOf[ValidationException]) def testUnmatchedTypesInsert(): Unit = { val util = batchTestUtil() util.addTable[(Int, Long, String)]("sourceTable", 'a, 'b, 'c) val fieldNames = Array("d", "e", "f") val fieldTypes: Array[TypeInformation[_]] = Array(Types.STRING, Types.INT, Types.LONG) val sink = new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink util.tableEnv.registerTableSink("targetTable", sink.configure(fieldNames, fieldTypes)) val sql = "INSERT INTO targetTable SELECT a, b, c FROM sourceTable" // must fail because types of table sink do not match query result util.tableEnv.sqlUpdate(sql) } @Test(expected = classOf[ValidationException]) def testUnsupportedPartialInsert(): Unit = { val util = batchTestUtil() util.addTable[(Int, Long, String)]("sourceTable", 'a, 'b, 'c) val fieldNames = Array("d", "e", "f") val fieldTypes = util.tableEnv.scan("sourceTable").getSchema.getFieldTypes val sink = new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink util.tableEnv.registerTableSink("targetTable", sink.configure(fieldNames, fieldTypes)) val sql = "INSERT INTO targetTable (d, f) SELECT a, c FROM sourceTable" // must fail because partial insert is not supported yet. util.tableEnv.sqlUpdate(sql) } }
fhueske/flink
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/batch/sql/validation/InsertIntoValidationTest.scala
Scala
apache-2.0
3,194
package services.documents.pdf import java.io.ByteArrayInputStream import com.itextpdf.text.pdf.PdfReader import org.apache.commons.io.IOUtils import play.api.test.PlaySpecification class ITextPDFConcatenatorTest extends PlaySpecification { "the concat method" should { "produce the correct number of pages when two documents are concatenated" in { val pdf1: Array[Byte] = IOUtils.toByteArray(getClass.getClassLoader.getResourceAsStream("forms/pdf_templates/VBA-21-0966-ARE.pdf")) val pdf2: Array[Byte] = IOUtils.toByteArray(getClass.getClassLoader.getResourceAsStream("forms/pdf_templates/VBA-21-526EZ-ARE.pdf")) val result: Array[Byte] = new ITextPDFConcatenator().concat(Seq(pdf1, pdf2)) val concatReader: PdfReader = new PdfReader(new ByteArrayInputStream(result)) val pdf1Reader: PdfReader = new PdfReader(getClass.getClassLoader.getResourceAsStream("forms/pdf_templates/VBA-21-0966-ARE.pdf")) val pdf2Reader: PdfReader = new PdfReader(getClass.getClassLoader.getResourceAsStream("forms/pdf_templates/VBA-21-526EZ-ARE.pdf")) pdf1Reader.getNumberOfPages + pdf2Reader.getNumberOfPages must be equalTo concatReader.getNumberOfPages } } }
vetafi/vetafi-web
test/services/documents/pdf/ITextPDFConcatenatorTest.scala
Scala
apache-2.0
1,234
package feh.util trait AbstractScopedState[T]{ protected def default: T protected def state_=(t: T) def get: T def state = get def doWith[R](t: T, f: T => R): R = doWith(t)(f(t)) def doWith[R](t: T)(r: => R): R = { val old = get state = t val res = r state = old res } } object AbstractScopedState{ trait IgnoreUpdate[T] extends AbstractScopedState[T]{ override def doWith[R](t: T)(r: => R): R = if(ignoreDo > 0) { ignoreDo -= 1 r } else super.doWith(t)(r) /** ignore `doWith` state update * numeric state */ protected var ignoreDo = 0 def ignoring[R](times: Int)(f: => R): R = { val old = ignoreDo ignoreDo += times val res = f ignoreDo = old res } } } class ThreadUnsafeScopedState[T](val default: T) extends AbstractScopedState[T]{ protected var _state = default protected def state_=(t: T): Unit = _state = t def get: T = _state } trait ScopedInThreadState[T] extends AbstractScopedState[T]{ private val _state = new ThreadLocal[T]{ override def initialValue(): T = default } def get: T = _state.get protected def state_=(t: T) = _state.set(t) } class ScopedState[T](protected val default: T) extends ScopedInThreadState[T] class ScopedStates[T](protected val default: Set[T]) extends ScopedInThreadState[Set[T]]{ def doWithAdditional[R](t: Set[T])(r: => R): R = { val c = get.filter(t.contains) state = get ++ t val res = r state = get -- t ++ c res } // def doWithout[R](t: Set[T])(r: => R): R = ??? }
fehu/util
src/main/scala/feh/util/ScopedState.scala
Scala
mit
1,597
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.rules.physical.common import org.apache.flink.table.api.TableException import org.apache.flink.table.connector.source.LookupTableSource import org.apache.flink.table.planner.plan.nodes.common.{CommonLookupJoin, CommonPhysicalTableSourceScan} import org.apache.flink.table.planner.plan.nodes.logical._ import org.apache.flink.table.planner.plan.nodes.physical.PhysicalLegacyTableSourceScan import org.apache.flink.table.planner.plan.rules.common.CommonTemporalTableJoinRule import org.apache.flink.table.planner.plan.schema.TimeIndicatorRelDataType import org.apache.flink.table.planner.plan.utils.JoinUtil import org.apache.flink.table.sources.LookupableTableSource import org.apache.calcite.plan.RelOptRule.{any, operand} import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelOptTable} import org.apache.calcite.rel.RelNode import org.apache.calcite.rel.core.TableScan import org.apache.calcite.rex.RexProgram import java.util import scala.collection.JavaConversions._ /** * Base implementation for both * [[org.apache.flink.table.planner.plan.rules.physical.batch.BatchExecLookupJoinRule]] and * [[org.apache.flink.table.planner.plan.rules.physical.stream.StreamExecLookupJoinRule]]. */ trait CommonLookupJoinRule extends CommonTemporalTableJoinRule { protected def matches( join: FlinkLogicalJoin, snapshot: FlinkLogicalSnapshot, tableScan: TableScan): Boolean = { // Lookup join is a kind of implementation of temporal table join if (!matches(snapshot)) { return false } // Temporal table join implemented by lookup join only supports on LookupTableSource if (!isTableSourceScan(tableScan) || !isLookupTableSource(tableScan)) { return false } // Temporal table join implemented by lookup join only supports processing-time join // Other temporal table join will be matched by CommonTemporalTableJoinRule val isProcessingTime = snapshot.getPeriod.getType match { case t: TimeIndicatorRelDataType if !t.isEventTime => true case _ => false } isProcessingTime } protected def isTableSourceScan(relNode: RelNode): Boolean = { relNode match { case _: FlinkLogicalLegacyTableSourceScan | _: PhysicalLegacyTableSourceScan | _: FlinkLogicalTableSourceScan | _: CommonPhysicalTableSourceScan => true case _ => false } } protected def isLookupTableSource(relNode: RelNode): Boolean = { relNode match { case scan: FlinkLogicalLegacyTableSourceScan => scan.tableSource.isInstanceOf[LookupableTableSource[_]] case scan: PhysicalLegacyTableSourceScan => scan.tableSource.isInstanceOf[LookupableTableSource[_]] case scan: FlinkLogicalTableSourceScan => scan.tableSource.isInstanceOf[LookupTableSource] case scan: CommonPhysicalTableSourceScan => scan.tableSource.isInstanceOf[LookupTableSource] // TODO: find TableSource in FlinkLogicalIntermediateTableScan case _ => false } } // TODO Support `IS NOT DISTINCT FROM` in the future: FLINK-13509 protected def validateJoin(join: FlinkLogicalJoin): Unit = { val filterNulls: Array[Boolean] = { val filterNulls = new util.ArrayList[java.lang.Boolean] JoinUtil.createJoinInfo(join.getLeft, join.getRight, join.getCondition, filterNulls) filterNulls.map(_.booleanValue()).toArray } if (filterNulls.contains(false)) { throw new TableException( s"LookupJoin doesn't support join condition contains 'a IS NOT DISTINCT FROM b' (or " + s"alternative '(a = b) or (a IS NULL AND b IS NULL)'), the join condition is " + s"'${join.getCondition}'") } } protected def transform( join: FlinkLogicalJoin, input: FlinkLogicalRel, temporalTable: RelOptTable, calcProgram: Option[RexProgram]): CommonLookupJoin } abstract class BaseSnapshotOnTableScanRule(description: String) extends RelOptRule( operand(classOf[FlinkLogicalJoin], operand(classOf[FlinkLogicalRel], any()), operand(classOf[FlinkLogicalSnapshot], operand(classOf[TableScan], any()))), description) with CommonLookupJoinRule { override def matches(call: RelOptRuleCall): Boolean = { val join = call.rel[FlinkLogicalJoin](0) val snapshot = call.rel[FlinkLogicalSnapshot](2) val tableScan = call.rel[TableScan](3) matches(join, snapshot, tableScan) } override def onMatch(call: RelOptRuleCall): Unit = { val join = call.rel[FlinkLogicalJoin](0) val input = call.rel[FlinkLogicalRel](1) val tableScan = call.rel[RelNode](3) validateJoin(join) val temporalJoin = transform(join, input, tableScan.getTable, None) call.transformTo(temporalJoin) } } abstract class BaseSnapshotOnCalcTableScanRule(description: String) extends RelOptRule( operand(classOf[FlinkLogicalJoin], operand(classOf[FlinkLogicalRel], any()), operand(classOf[FlinkLogicalSnapshot], operand(classOf[FlinkLogicalCalc], operand(classOf[TableScan], any())))), description) with CommonLookupJoinRule { override def matches(call: RelOptRuleCall): Boolean = { val join = call.rel[FlinkLogicalJoin](0) val snapshot = call.rel[FlinkLogicalSnapshot](2) val tableScan = call.rel[TableScan](4) matches(join, snapshot, tableScan) } override def onMatch(call: RelOptRuleCall): Unit = { val join = call.rel[FlinkLogicalJoin](0) val input = call.rel[FlinkLogicalRel](1) val calc = call.rel[FlinkLogicalCalc](3) val tableScan = call.rel[RelNode](4) validateJoin(join) val temporalJoin = transform( join, input, tableScan.getTable, Some(calc.getProgram)) call.transformTo(temporalJoin) } }
greghogan/flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/common/CommonLookupJoinRule.scala
Scala
apache-2.0
6,587
package scalaprops package scalazlaws import scalaz.{Equal, Representable} import scalaz.std.anyVal._ import scalaprops.Property.forAll object representable { def repUnrep[F[_], X, A](implicit F: Representable[F, X], G: Gen[F[A]], E: Equal[F[A]]): Property = forAll(F.representableLaw.repUnrep[A] _) def unrepRep[F[_], X, A](implicit F: Representable[F, X], G1: Gen[X => A], G2: Gen[X], E: Equal[A]): Property = forAll(F.representableLaw.unrepRep[A] _) def laws[F[_], X](implicit F: Representable[F, X], G1: Gen[X => Byte], G2: Gen[F[Byte]], G3: Gen[X], E: Equal[F[Byte]] ) = Properties.properties(ScalazLaw.representable)( ScalazLaw.representableRepUnrep -> repUnrep[F, X, Byte], ScalazLaw.representableUnrepRep -> unrepRep[F, X, Byte] ) def all[F[_], X](implicit F: Representable[F, X], G1: Gen[X => Byte], G2: Gen[F[Byte]], G3: Gen[X], E: Equal[F[Byte]] ) = laws[F, X] }
scalaprops/scalaprops
scalaz/src/main/scala/scalaprops/scalazlaws/representable.scala
Scala
mit
963
/* * Copyright 2015 Webtrends (http://www.webtrends.com) * * See the LICENCE.txt file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.webtrends.harness.component.kafka.actor import akka.actor.{Actor, Props} import com.webtrends.harness.component.kafka.KafkaConsumerCoordinator.TopicPartitionResp import com.webtrends.harness.component.kafka.actor.AssignmentDistributorLeader.PartitionAssignment import com.webtrends.harness.component.kafka.util.KafkaSettings import com.webtrends.harness.component.zookeeper.{ZookeeperAdapter, ZookeeperEventAdapter} import com.webtrends.harness.health.{ComponentState, HealthComponent} import com.webtrends.harness.logging.ActorLoggingAdapter import kafka.api.TopicMetadataRequest import kafka.consumer.SimpleConsumer import scala.collection.mutable import scala.collection.immutable._ import scala.language.postfixOps import scala.util.Try object KafkaTopicManager { /** * BrokerSpec maps the JSON used by Kafka 0.8 to describe a broker as * written by it in Zookeeper. * @param host The broker hostname. * @param port The broker port. * @param cluster The zookeeper cluster this broker belongs to */ case class BrokerSpec(host: String, port: Int, cluster: String) case class DownSources(sources: Set[String]) case object TopicPartitionReq def props() = Props(classOf[KafkaTopicManager]) } class KafkaTopicManager() extends Actor with KafkaSettings with ActorLoggingAdapter with ZookeeperAdapter with ZookeeperEventAdapter { import KafkaTopicManager._ val actorName = "Kafka Topic Manager" val sourceMonitor = if (Try { kafkaConfig.getBoolean("monitor-sources") } getOrElse false) { log.info("'monitor-sources' is true, starting Source Monitor") Some(context.actorOf(Props(classOf[SourceMonitor], "topic-manager"), "topic-source-monitor")) } else None // Holder of consumers connected to each kafka broker val consumersByHost = new mutable.HashMap[String, SimpleConsumer]() var downSources = Set[String]() context.parent ! HealthComponent(actorName, ComponentState.NORMAL, "Proxy has been started") def receive: Receive = configReceive orElse { case TopicPartitionReq => sender ! TopicPartitionResp(getPartitionLeaders) case msg: DownSources => downSources = msg.sources } def getPartitionLeaders: SortedSet[PartitionAssignment] = { var partitionsByTopic = new TreeSet[PartitionAssignment]()(Ordering.by[PartitionAssignment, String] (a => a.topic + a.cluster + a.partition)) val topicMetaRequest = new TopicMetadataRequest(versionId = 0, correlationId = 0, clientId = clientId, topics = Seq()) // Get our partition meta data for the configured topics val processedClusters = new mutable.HashSet[String]() val brokers = kafkaSources sourceMonitor foreach(_ ! HostList(brokers.values.map(_.host).toList)) for (bro <- brokers.values if !processedClusters.contains(bro.cluster)) { try { val consumer = consumersByHost.getOrElseUpdate(bro.host, new SimpleConsumer(bro.host, bro.port, 15000, bufferSize, clientId)) val topicsMetaResp = consumer.send(topicMetaRequest) for ( topicMeta <- topicsMetaResp.topicsMetadata.filter { meta => topicMap.keys.toList.contains(meta.topic) }; partMeta <- topicMeta.partitionsMetadata ) yield { partMeta.leader match { case Some(broker) => log.debug(s"Leader found for topic [${topicMeta.topic}:${partMeta.partitionId}]: ${broker.host}") partitionsByTopic += PartitionAssignment(topicMeta.topic, partMeta.partitionId, brokers(broker.host).cluster, broker.host) case None => log.error(s"No leader found for topic [${topicMeta.topic}:${partMeta.partitionId}]") } processedClusters.add(bro.cluster) } } catch { case e: Throwable => log.error(s"Unable to get topic meta data from ${bro.host}, will retry soon", e) consumersByHost.remove(bro.host).foreach(_.close()) } } val unprocClusters = brokers.filter(it => !processedClusters.contains(it._2.cluster)).values.toSet if (unprocClusters.nonEmpty) { log.warn(s"Some brokers despondent: ${unprocClusters.map(_.cluster).mkString(",")}. Remaining brokers will start their workers.") val okayAndDown = unprocClusters.partition(it => downSources.contains(it.host)) context.parent ! HealthComponent(actorName, if (okayAndDown._2.nonEmpty) ComponentState.DEGRADED else ComponentState.NORMAL, s"Despondent Clusters: [${okayAndDown._2.map(_.cluster).mkString(",")}], Scheduled Downtime: [${okayAndDown._1.map(_.cluster).mkString(",")}]") } else { log.debug("Successfully processed brokers {}", brokers.toString()) context.parent ! HealthComponent(actorName, ComponentState.NORMAL, "Successfully fetched broker data") } partitionsByTopic } }
Webtrends/wookiee-kafka
src/main/scala/com/webtrends/harness/component/kafka/actor/KafkaTopicManager.scala
Scala
apache-2.0
5,587
package org.planteome.samara import org.mapdb.{DBMaker, Fun} import scala.collection.JavaConverters._ import scala.util.Random trait TermFinderTaxonCacheMapDBStatic extends TermFinderTaxonCacheMapDB { override lazy val mapdbIterator: Iterator[Fun.Tuple2[String, List[Integer]]] = { val iter = new Iterator[Fun.Tuple2[String, List[Integer]]] { override def hasNext: Boolean = { true } override def next(): Fun.Tuple2[String, List[Integer]] = { new Fun.Tuple2[String, List[Integer]](Random.nextString(50), List(1, 2)) } } iter.slice(0, 10000) } }
jhpoelen/samara
src/main/scala/org/planteome/samara/TermFinderTaxonCacheMapDBStatic.scala
Scala
mit
607
package finatra.quickstart import com.google.inject.Module import com.twitter.finatra.http.HttpServer import com.twitter.finatra.http.filters.CommonFilters import com.twitter.finatra.http.routing.HttpRouter import com.twitter.inject.app.DtabResolution import finatra.quickstart.controllers.TweetsController import finatra.quickstart.modules.{FirebaseHttpClientModule, TwitterCloneJacksonModule} import finatra.quickstart.warmup.TwitterCloneWarmupHandler object TwitterCloneServerMain extends TwitterCloneServer class TwitterCloneServer extends HttpServer with DtabResolution { override val modules: Seq[Module] = Seq(FirebaseHttpClientModule) override def jacksonModule: Module = TwitterCloneJacksonModule override def configureHttp(router: HttpRouter): Unit = { router .filter[CommonFilters] .add[TweetsController] } override protected def warmup(): Unit = { handle[TwitterCloneWarmupHandler]() } }
twitter/finatra
examples/advanced/twitter-clone/src/main/scala/finatra/quickstart/TwitterCloneServer.scala
Scala
apache-2.0
942
package org.jetbrains.plugins.scala package lang package parser import com.intellij.lang.{ASTNode, Language} import com.intellij.lexer.Lexer import com.intellij.openapi.project.Project import com.intellij.psi.stubs.PsiFileStub import com.intellij.psi.tree.{ICompositeElementType, IElementType, IErrorCounterReparseableElementType, IStubFileElementType} import com.intellij.psi.{PsiElement, PsiFile} import org.jetbrains.annotations.NotNull import org.jetbrains.plugins.scala.lang.lexer.{ScalaElementType, ScalaLexer, ScalaTokenTypes} import org.jetbrains.plugins.scala.lang.parser.ScalaPsiCreator.SelfPsiCreator import org.jetbrains.plugins.scala.lang.psi.impl.base.patterns._ import org.jetbrains.plugins.scala.lang.psi.impl.base.types._ import org.jetbrains.plugins.scala.lang.psi.impl.base.{ScConstructorImpl, ScInterpolatedStringLiteralImpl, ScLiteralImpl, ScStableCodeReferenceElementImpl} import org.jetbrains.plugins.scala.lang.psi.impl.expr._ import org.jetbrains.plugins.scala.lang.psi.impl.expr.xml._ import org.jetbrains.plugins.scala.lang.psi.impl.statements.params.ScParameterTypeImpl import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.templates.ScRequiresBlockImpl import org.jetbrains.plugins.scala.lang.psi.stubs.elements._ import org.jetbrains.plugins.scala.lang.psi.stubs.elements.signatures.{ScClassParameterElementType, ScParamClauseElementType, ScParamClausesElementType, ScParameterElementType} /** * User: Dmitry.Krasilschikov * Date: 02.10.2006 * */ object ScalaElementTypes { val COMPOUND_TYPE = new ScalaElementType("compound type") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScCompoundTypeElementImpl(node) } val EXISTENTIAL_TYPE = new ScalaElementType("existential type") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScExistentialTypeElementImpl(node) } val EXISTENTIAL_CLAUSE = new ScalaElementType("existential clause") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScExistentialClauseImpl(node) } val DUMMY_ELEMENT = new ScalaElementType("Dummy Element") val IDENTIFIER_LIST = new ScIdListElementType val FIELD_ID = new ScFieldIdElementType val IMPORT_SELECTOR = new ScImportSelectorElementType val IMPORT_SELECTORS = new ScImportSelectorsElementType val IMPORT_EXPR = new ScImportExprElementType val IMPORT_STMT = new ScImportStmtElementType val VALUE_DECLARATION = new ScValueDeclarationElementType val VARIABLE_DECLARATION = new ScVariableDeclarationElementType val FUNCTION_DECLARATION = new ScFunctionDeclarationElementType val TYPE_DECLARATION = new ScTypeAliasDeclarationElementType val PATTERN_DEFINITION = new ScValueDefinitionElementType val PATTERN_LIST = new ScPatternListElementType val VARIABLE_DEFINITION = new ScVariableDefinitionElementType val TYPE_DEFINITION = new ScTypeAliasDefinitionElementType val EARLY_DEFINITIONS = new ScEarlyDefinitionsElementType val FUNCTION_DEFINITION = new ScFunctionDefinitionElementType val MACRO_DEFINITION = new ScMacroDefinitionElementType val MODIFIERS = new ScModifiersElementType("moifiers") val ACCESS_MODIFIER = new ScAccessModifierElementType val ANNOTATION = new ScAnnotationElementType val ANNOTATIONS = new ScAnnotationsElementType val REFERENCE_PATTERN = new ScReferencePatternElementType val BLOCK_EXPR = new ScCodeBlockElementType val PACKAGING = new ScPackagingElementType val EXTENDS_BLOCK = new ScExtendsBlockElementType val TEMPLATE_PARENTS = new ScTemplateParentsElementType val TEMPLATE_BODY = new ScTemplateBodyElementType val NEW_TEMPLATE = new ScNewTemplateDefinitionStubElementType val PARAM = new ScParameterElementType val PARAM_CLAUSE = new ScParamClauseElementType val PARAM_CLAUSES = new ScParamClausesElementType val CLASS_PARAM = new ScClassParameterElementType val TYPE_PARAM_CLAUSE = new ScTypeParamClauseElementType val TYPE_PARAM = new ScTypeParamElementType val SELF_TYPE = new ScSelfTypeElementElementType val PRIMARY_CONSTRUCTOR = new ScPrimaryConstructorElementType //Stub element types val FILE: IStubFileElementType[_ <: PsiFileStub[_ <: PsiFile]] = new ScStubFileElementType val CLASS_DEFINITION = new ScClassDefinitionElementType val OBJECT_DEFINITION = new ScObjectDefinitionElementType val TRAIT_DEFINITION = new ScTraitDefinitionElementType val CONSTRUCTOR = new ScalaElementType("constructor", true) with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScConstructorImpl(node) } val TEMPLATE = new ScalaElementType("template", true) val REQUIRES_BLOCK = new ScalaElementType("requires block") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScRequiresBlockImpl(node) } val PARAM_TYPE = new ScalaElementType("parameter type") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScParameterTypeImpl(node) } val VARIANT_TYPE_PARAM = new ScalaElementType("variant parameter of type") val TYPE_PARAMS = new ScalaElementType("parameters of type") val SIMPLE_TYPE = new ScalaElementType("simple type") val INFIX_TYPE = new ScalaElementType("infix type") val TYPE = new ScalaElementType("common type") val TYPES = new ScalaElementType("common type") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTypesImpl(node) } val TYPE_ARGS = new ScalaElementType("type arguments") val ANNOT_TYPE = new ScalaElementType("annotation type") val WILDCARD_TYPE = new ScalaElementType("wildcard type") val ASCRIPTION = new ScalaElementType("ascription") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScAscriptionImpl(node) } val TUPLE_TYPE = new ScalaElementType("tuple type") val TYPE_IN_PARENTHESIS = new ScalaElementType("type in parenthesis") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScParenthesisedTypeElementImpl(node) } val TYPE_PROJECTION = new ScalaElementType("type projection") val TYPE_GENERIC_CALL = new ScalaElementType("type generic call") val LITERAL_TYPE = new ScalaElementType("Literal type") val SEQUENCE_ARG = new ScalaElementType("sequence argument type") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScSequenceArgImpl(node) } val TYPE_VARIABLE = new ScalaElementType("type variable") val UNIT_EXPR = new ScalaElementType("unit expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScUnitExprImpl(node) } val REFERENCE = new ScalaElementType("reference") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScStableCodeReferenceElementImpl(node) } val IMPORT = new ScalaElementType("import") val STABLE_ID_LIST = new ScalaElementType("stable id list") val STATEMENT_TEMPLATE = new ScalaElementType("template statement") val FUN_SIG = new ScalaElementType("function signature") val CONSTR_EXPR = new ScalaElementType("constructor expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScConstrExprImpl(node) } val SELF_INVOCATION = new ScalaElementType("self invocation") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScSelfInvocationImpl(node) } val LOWER_BOUND_TYPE = new ScalaElementType("lower bound type") val UPPER_BOUND_TYPE = new ScalaElementType("upper bound type") val NAME_VALUE_PAIR = new ScalaElementType("name value pair") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScNameValuePairImpl(node) } val ANNOTATION_EXPR = new ScalaElementType("annotation expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScAnnotationExprImpl(node) } val LITERAL = new ScalaElementType("Literal") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScLiteralImpl(node) } // String literals val STRING_LITERAL = new ScalaElementType("String Literal") val INTERPOLATED_STRING_LITERAL = new ScalaElementType("Interpolated String Literal") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScInterpolatedStringLiteralImpl(node) } //Not only String, but quasiquote too val INTERPOLATED_PREFIX_PATTERN_REFERENCE = new ScalaElementType("Interpolated Prefix Pattern Reference") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScInterpolatedPrefixReference(node) } val INTERPOLATED_PREFIX_LITERAL_REFERENCE = new ScalaElementType("Interpolated Prefix Literal Reference") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScInterpolatedStringPartReference(node) } // Boolean literals val BOOLEAN_LITERAL = new ScalaElementType("Boolean Literal") /** ***********************************************************************************/ /** ************************************ EXPRESSIONS **********************************/ /** ***********************************************************************************/ /**/ val PREFIX_EXPR = new ScalaElementType("prefix expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScPrefixExprImpl(node) } val PREFIX = new ScalaElementType("prefix") val POSTFIX_EXPR = new ScalaElementType("postfix expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScPostfixExprImpl(node) } val INFIX_EXPR = new ScalaElementType("infix expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScInfixExprImpl(node) } val PLACEHOLDER_EXPR = new ScalaElementType("simple expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScUnderscoreSectionImpl(node) } val PARENT_EXPR = new ScalaElementType("Expression in parentheses") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScParenthesisedExprImpl(node) } val METHOD_CALL = new ScalaElementType("Method call") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScMethodCallImpl(node) } val REFERENCE_EXPRESSION = new ScalaElementType("Reference expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScReferenceExpressionImpl(node) } val THIS_REFERENCE = new ScalaElementType("This reference") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScThisReferenceImpl(node) } val SUPER_REFERENCE = new ScalaElementType("Super reference") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScSuperReferenceImpl(node) } val GENERIC_CALL = new ScalaElementType("Generified call") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScGenericCallImpl(node) } val EXPR1 = new ScalaElementType("composite expression ") val FUNCTION_EXPR = new ScalaElementType("expression") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScFunctionExprImpl(node) } val AN_FUN = new ScalaElementType("anonymous function") val GENERATOR = new ScalaElementType("generator") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScGeneratorImpl(node) } val ENUMERATOR = new ScalaElementType("enumerator") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScEnumeratorImpl(node) } val ENUMERATORS = new ScalaElementType("enumerator") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScEnumeratorsImpl(node) } val GUARD = new ScalaElementType("guard") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScGuardImpl(node) } val EXPRS = new ScalaElementType("list of expressions") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScExprsImpl(node) } val ARG_EXPRS = new ScalaElementType("arguments of function") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScArgumentExprListImpl(node) } val CONSTR_BLOCK = new ScalaElementType("constructor block") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScConstrBlockImpl(node) } val ERROR_STMT = new ScalaElementType("error statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScErrorStatImpl(node) } val BLOCK = new ScalaElementType("block") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScBlockImpl(node) } val TUPLE = new ScalaElementType("Tuple") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTupleImpl(node) } /** ****************************** COMPOSITE EXPRESSIONS *****************************/ val IF_STMT = new ScalaElementType("if statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScIfStmtImpl(node) } val FOR_STMT = new ScalaElementType("for statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScForStatementImpl(node) } val DO_STMT = new ScalaElementType("do-while statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScDoStmtImpl(node) } val TRY_STMT = new ScalaElementType("try statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTryStmtImpl(node) } val TRY_BLOCK = new ScalaElementType("try block") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTryBlockImpl(node) } val CATCH_BLOCK = new ScalaElementType("catch block") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScCatchBlockImpl(node) } val FINALLY_BLOCK = new ScalaElementType("finally block") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScFinallyBlockImpl(node) } val WHILE_STMT = new ScalaElementType("while statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScWhileStmtImpl(node) } val RETURN_STMT = new ScalaElementType("return statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScReturnStmtImpl(node) } val THROW_STMT = new ScalaElementType("throw statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScThrowStmtImpl(node) } val ASSIGN_STMT = new ScalaElementType("assign statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScAssignStmtImpl(node) } val MATCH_STMT = new ScalaElementType("match statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScMatchStmtImpl(node) } val TYPED_EXPR_STMT = new ScalaElementType("typed statement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTypedStmtImpl(node) } /** ***********************************************************************************/ /** ************************************ PATTERNS *************************************/ /** ***********************************************************************************/ val TUPLE_PATTERN = new ScalaElementType("Tuple Pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTuplePatternImpl(node) } val SEQ_WILDCARD = new ScalaElementType("Sequence Wildcard") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScSeqWildcardImpl(node) } val CONSTRUCTOR_PATTERN = new ScalaElementType("Constructor Pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScConstructorPatternImpl(node) } val PATTERN_ARGS = new ScalaElementType("Pattern arguments") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScPatternArgumentListImpl(node) } val INFIX_PATTERN = new ScalaElementType("Infix pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScInfixPatternImpl(node) } val NAMING_PATTERN = new ScalaElementType("Binding Pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScNamingPatternImpl(node) } val TYPED_PATTERN = new ScalaElementType("Typed Pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTypedPatternImpl(node) } val PATTERN = new ScalaElementType("Composite Pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScCompositePatternImpl(node) } val PATTERNS = new ScalaElementType("patterns") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScPatternsImpl(node) } val WILDCARD_PATTERN = new ScalaElementType("any sequence") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScWildcardPatternImpl(node) } val CASE_CLAUSE = new ScalaElementType("case clause") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScCaseClauseImpl(node) } val CASE_CLAUSES = new ScalaElementType("case clauses") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScCaseClausesImpl(node) } val LITERAL_PATTERN = new ScalaElementType("literal pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScLiteralPatternImpl(node) } val INTERPOLATION_PATTERN = new ScalaElementType("interpolation pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScInterpolationPatternImpl(node) } val STABLE_REFERENCE_PATTERN = new ScalaElementType("stable reference pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScStableReferenceElementPatternImpl(node) } val PATTERN_IN_PARENTHESIS = new ScalaElementType("pattern in parenthesis") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScParenthesisedPatternImpl(node) } /** ************************************ TYPE PATTERNS ********************************/ val TYPE_PATTERN_ARGS = new ScalaElementType("Type pattern arguments") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTypePatternArgsImpl(node) } val TYPE_PATTERN = new ScalaElementType("Type pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScTypePatternImpl(node) } val REFINEMENT = new ScalaElementType("refinement") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScRefinementImpl(node) } /** ************************************* XML *************************************/ val XML_EXPR = new ScalaElementType("Xml expr") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlExprImpl(node) } val XML_START_TAG = new ScalaElementType("Xml start tag") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlStartTagImpl(node) } val XML_END_TAG = new ScalaElementType("Xml end tag") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlEndTagImpl(node) } val XML_EMPTY_TAG = new ScalaElementType("Xml empty tag") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlEmptyTagImpl(node) } val XML_PI = new ScalaElementType("Xml proccessing instruction") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlPIImpl(node) } val XML_CD_SECT = new ScalaElementType("Xml cdata section") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlCDSectImpl(node) } val XML_ATTRIBUTE = new ScalaElementType("Xml attribute") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlAttributeImpl(node) } val XML_PATTERN = new ScalaElementType("Xml pattern") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlPatternImpl(node) } val XML_COMMENT = new ScalaElementType("Xml comment") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlCommentImpl(node) } val XML_ELEMENT = new ScalaElementType("Xml element") with SelfPsiCreator { override def createElement(node: ASTNode): PsiElement = new ScXmlElementImpl(node) } class ScCodeBlockElementType() extends IErrorCounterReparseableElementType("block of expressions", ScalaLanguage.INSTANCE) with ICompositeElementType { override def createNode(text: CharSequence): ASTNode = new ScBlockExprImpl(text) @NotNull override def createCompositeNode: ASTNode = new ScBlockExprImpl(null) override def getErrorsCount(seq: CharSequence, fileLanguage: Language, project: Project): Int = { import com.intellij.psi.tree.IErrorCounterReparseableElementType._ val lexer: Lexer = new ScalaLexer lexer.start(seq) if (lexer.getTokenType != ScalaTokenTypes.tLBRACE) return FATAL_ERROR lexer.advance() var balance: Int = 1 var flag = false while (!flag) { val tp: IElementType = lexer.getTokenType if (tp == null) flag = true else if (balance == 0) return FATAL_ERROR else if (tp == ScalaTokenTypes.tLBRACE) { balance += 1 } else if (tp == ScalaTokenTypes.tRBRACE) { balance -= 1 } lexer.advance() } balance } } }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/ScalaElementTypes.scala
Scala
apache-2.0
22,069
/* * Copyright 2010 LinkedIn * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.consumer import scala.collection.mutable._ import scala.collection.JavaConversions._ import org.I0Itec.zkclient._ import joptsimple._ import org.apache.log4j.Logger import java.util.Arrays.asList import java.util.Properties import java.util.Random import java.io.PrintStream import kafka.message._ import kafka.utils.Utils import kafka.utils.ZkUtils import kafka.utils.StringSerializer /** * Consumer that dumps messages out to standard out. * */ object ConsoleConsumer { private val logger = Logger.getLogger(getClass()) def main(args: Array[String]) { val parser = new OptionParser val topicIdOpt = parser.accepts("topic", "REQUIRED: The topic id to consume on.") .withRequiredArg .describedAs("topic") .ofType(classOf[String]) val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the form host:port. " + "Multiple URLS can be given to allow fail-over.") .withRequiredArg .describedAs("urls") .ofType(classOf[String]) val groupIdOpt = parser.accepts("group", "The group id to consume on.") .withRequiredArg .describedAs("gid") .defaultsTo("console-consumer-" + new Random().nextInt(100000)) .ofType(classOf[String]) val fetchSizeOpt = parser.accepts("fetch-size", "The amount of data to fetch in a single request.") .withRequiredArg .describedAs("size") .ofType(classOf[java.lang.Integer]) .defaultsTo(1024 * 1024) val socketBufferSizeOpt = parser.accepts("socket-buffer-size", "The size of the tcp RECV size.") .withRequiredArg .describedAs("size") .ofType(classOf[java.lang.Integer]) .defaultsTo(2 * 1024 * 1024) val messageFormatterOpt = parser.accepts("formatter", "The name of a class to use for formatting kafka messages for display.") .withRequiredArg .describedAs("class") .ofType(classOf[String]) .defaultsTo(classOf[NewlineMessageFormatter].getName) val messageFormatterArgOpt = parser.accepts("property") .withRequiredArg .describedAs("prop") .ofType(classOf[String]) val resetBeginningOpt = parser.accepts("from-beginning", "If the consumer does not already have an established offset to consume from, " + "start with the earliest message present in the log rather than the latest message.") val autoCommitIntervalOpt = parser.accepts("autocommit.interval.ms", "The time interval at which to save the current offset in ms") .withRequiredArg .describedAs("ms") .ofType(classOf[java.lang.Integer]) .defaultsTo(10*1000) val maxMessagesOpt = parser.accepts("max-messages", "The maximum number of messages to consume before exiting. If not set, consumption is continual.") .withRequiredArg .describedAs("num_messages") .ofType(classOf[java.lang.Integer]) val skipMessageOnErrorOpt = parser.accepts("skip-message-on-error", "If there is an error when processing a message, " + "skip it instead of halt.") val options: OptionSet = tryParse(parser, args) checkRequiredArgs(parser, options, topicIdOpt, zkConnectOpt) val props = new Properties() props.put("groupid", options.valueOf(groupIdOpt)) props.put("socket.buffer.size", options.valueOf(socketBufferSizeOpt).toString) props.put("fetch.size", options.valueOf(fetchSizeOpt).toString) props.put("auto.commit", "true") props.put("autocommit.interval.ms", options.valueOf(autoCommitIntervalOpt).toString) props.put("autooffset.reset", if(options.has(resetBeginningOpt)) "smallest" else "largest") props.put("zk.connect", options.valueOf(zkConnectOpt)) val config = new ConsumerConfig(props) val skipMessageOnError = if (options.has(skipMessageOnErrorOpt)) true else false val topic = options.valueOf(topicIdOpt) val messageFormatterClass = Class.forName(options.valueOf(messageFormatterOpt)) val formatterArgs = tryParseFormatterArgs(options.valuesOf(messageFormatterArgOpt)) val maxMessages = if(options.has(maxMessagesOpt)) options.valueOf(maxMessagesOpt).intValue else -1 val connector = Consumer.create(config) Runtime.getRuntime.addShutdownHook(new Thread() { override def run() { connector.shutdown() // if there is no group specified then avoid polluting zookeeper with persistent group data, this is a hack if(!options.has(groupIdOpt)) tryCleanupZookeeper(options.valueOf(zkConnectOpt), options.valueOf(groupIdOpt)) } }) var stream: KafkaMessageStream = connector.createMessageStreams(Map(topic -> 1)).get(topic).get.get(0) val iter = if(maxMessages >= 0) stream.slice(0, maxMessages) else stream val formatter: MessageFormatter = messageFormatterClass.newInstance().asInstanceOf[MessageFormatter] formatter.init(formatterArgs) try { for(message <- iter) { try { formatter.writeTo(message, System.out) } catch { case e => if (skipMessageOnError) logger.error("error processing message, skipping and resume consumption: " + e) else throw e } } } catch { case e => logger.error("error processing message, stop consuming: " + e) } System.out.flush() formatter.close() connector.shutdown() } def tryParse(parser: OptionParser, args: Array[String]) = { try { parser.parse(args : _*) } catch { case e: OptionException => { Utils.croak(e.getMessage) null } } } def checkRequiredArgs(parser: OptionParser, options: OptionSet, required: OptionSpec[_]*) { for(arg <- required) { if(!options.has(arg)) { logger.error("Missing required argument \\"" + arg + "\\"") parser.printHelpOn(System.err) System.exit(1) } } } def tryParseFormatterArgs(args: Iterable[String]): Properties = { val splits = args.map(_ split "=").filterNot(_ == null).filterNot(_.length == 0) if(!splits.forall(_.length == 2)) { System.err.println("Invalid parser arguments: " + args.mkString(" ")) System.exit(1) } val props = new Properties for(a <- splits) props.put(a(0), a(1)) props } trait MessageFormatter { def writeTo(message: Message, output: PrintStream) def init(props: Properties) {} def close() {} } class NewlineMessageFormatter extends MessageFormatter { def writeTo(message: Message, output: PrintStream) { val payload = message.payload output.write(payload.array, payload.arrayOffset, payload.limit) output.write('\\n') } } def tryCleanupZookeeper(zkUrl: String, groupId: String) { try { val dir = "/consumers/" + groupId logger.info("Cleaning up temporary zookeeper data under " + dir + ".") val zk = new ZkClient(zkUrl, 30*1000, 30*1000, StringSerializer) zk.deleteRecursive(dir) zk.close() } catch { case _ => // swallow } } }
tcrayford/hafka
kafka/core/src/main/scala/kafka/consumer/ConsoleConsumer.scala
Scala
bsd-3-clause
8,423
package test; trait Test[Bracks <: Bracks] { def f(list : Any) = null; class C[T] val bracks : Bracks; val singletons = f(bracks); }
AlexSikia/dotty
tests/untried/neg/t798.scala
Scala
bsd-3-clause
142
/* * Copyright 2015 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.computations import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input} case class CP20(value: Option[Int]) extends CtBoxIdentifier(name = "Consultancy") with CtOptionalInteger with Input object CP20 { def apply(int: Int): CP20 = CP20(Some(int)) }
keithhall/ct-calculations
src/main/scala/uk/gov/hmrc/ct/computations/CP20.scala
Scala
apache-2.0
894
object Main extends App { val source = scala.io.Source.fromFile(args(0)) val lines = source.getLines.filter(_.length > 0) for (l <- lines) println(l.toInt.toBinaryString) }
nikai3d/ce-challenges
moderate/dec2bin.scala
Scala
bsd-3-clause
184
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.csv import java.nio.charset.Charset import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, Path} import org.apache.hadoop.mapreduce._ import org.apache.spark.internal.Logging import org.apache.spark.sql.{AnalysisException, SparkSession} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.util.CompressionCodecs import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ import org.apache.spark.util.SerializableConfiguration /** * Provides access to CSV data from pure SQL statements. */ class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister { override def shortName(): String = "csv" override def isSplitable( sparkSession: SparkSession, options: Map[String, String], path: Path): Boolean = { val parsedOptions = new CSVOptions( options, columnPruning = sparkSession.sessionState.conf.csvColumnPruning, sparkSession.sessionState.conf.sessionLocalTimeZone) val csvDataSource = CSVDataSource(parsedOptions) csvDataSource.isSplitable && super.isSplitable(sparkSession, options, path) } override def inferSchema( sparkSession: SparkSession, options: Map[String, String], files: Seq[FileStatus]): Option[StructType] = { val parsedOptions = new CSVOptions( options, columnPruning = sparkSession.sessionState.conf.csvColumnPruning, sparkSession.sessionState.conf.sessionLocalTimeZone) CSVDataSource(parsedOptions).inferSchema(sparkSession, files, parsedOptions) } override def prepareWrite( sparkSession: SparkSession, job: Job, options: Map[String, String], dataSchema: StructType): OutputWriterFactory = { val conf = job.getConfiguration val csvOptions = new CSVOptions( options, columnPruning = sparkSession.sessionState.conf.csvColumnPruning, sparkSession.sessionState.conf.sessionLocalTimeZone) csvOptions.compressionCodec.foreach { codec => CompressionCodecs.setCodecConfiguration(conf, codec) } new OutputWriterFactory { override def newInstance( path: String, dataSchema: StructType, context: TaskAttemptContext): OutputWriter = { new CsvOutputWriter(path, dataSchema, context, csvOptions) } override def getFileExtension(context: TaskAttemptContext): String = { ".csv" + CodecStreams.getCompressionExtension(context) } } } override def buildReader( sparkSession: SparkSession, dataSchema: StructType, partitionSchema: StructType, requiredSchema: StructType, filters: Seq[Filter], options: Map[String, String], hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = { val broadcastedHadoopConf = sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf)) val parsedOptions = new CSVOptions( options, sparkSession.sessionState.conf.csvColumnPruning, sparkSession.sessionState.conf.sessionLocalTimeZone, sparkSession.sessionState.conf.columnNameOfCorruptRecord) // Check a field requirement for corrupt records here to throw an exception in a driver side dataSchema.getFieldIndex(parsedOptions.columnNameOfCorruptRecord).foreach { corruptFieldIndex => val f = dataSchema(corruptFieldIndex) if (f.dataType != StringType || !f.nullable) { throw new AnalysisException( "The field for corrupt records must be string type and nullable") } } if (requiredSchema.length == 1 && requiredSchema.head.name == parsedOptions.columnNameOfCorruptRecord) { throw new AnalysisException( "Since Spark 2.3, the queries from raw JSON/CSV files are disallowed when the\\n" + "referenced columns only include the internal corrupt record column\\n" + s"(named _corrupt_record by default). For example:\\n" + "spark.read.schema(schema).csv(file).filter($\\"_corrupt_record\\".isNotNull).count()\\n" + "and spark.read.schema(schema).csv(file).select(\\"_corrupt_record\\").show().\\n" + "Instead, you can cache or save the parsed results and then send the same query.\\n" + "For example, val df = spark.read.schema(schema).csv(file).cache() and then\\n" + "df.filter($\\"_corrupt_record\\".isNotNull).count()." ) } val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis (file: PartitionedFile) => { val conf = broadcastedHadoopConf.value.value val parser = new UnivocityParser( StructType(dataSchema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord)), StructType(requiredSchema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord)), parsedOptions) CSVDataSource(parsedOptions).readFile( conf, file, parser, requiredSchema, dataSchema, caseSensitive) } } override def toString: String = "CSV" override def hashCode(): Int = getClass.hashCode() override def equals(other: Any): Boolean = other.isInstanceOf[CSVFileFormat] override def supportDataType(dataType: DataType, isReadPath: Boolean): Boolean = dataType match { case _: AtomicType => true case udt: UserDefinedType[_] => supportDataType(udt.sqlType, isReadPath) case _ => false } } private[csv] class CsvOutputWriter( path: String, dataSchema: StructType, context: TaskAttemptContext, params: CSVOptions) extends OutputWriter with Logging { private val charset = Charset.forName(params.charset) private val writer = CodecStreams.createOutputStreamWriter(context, new Path(path), charset) private val gen = new UnivocityGenerator(dataSchema, writer, params) override def write(row: InternalRow): Unit = gen.write(row) override def close(): Unit = gen.close() }
eyalfa/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVFileFormat.scala
Scala
apache-2.0
6,821
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.feature.transform.vision.image.augmentation import com.intel.analytics.bigdl.dllib.feature.transform.vision.image.{ImageFrame, LocalImageFrame} import org.opencv.imgcodecs.Imgcodecs import org.scalatest.{FlatSpec, Matchers} class RandomResizeSpec extends FlatSpec with Matchers { val resource = getClass.getClassLoader.getResource("pascal/") "RandomResize" should "work properly" in { val data = ImageFrame.read(resource.getFile) val originalImageFeature = data.asInstanceOf[LocalImageFrame].array(0) var originalHeight = originalImageFeature.getHeight var originalWidth = originalImageFeature.getWidth if (originalHeight < originalWidth) { originalWidth = (originalWidth.toFloat / originalHeight * 256).toInt originalHeight = 256 } else { originalHeight = (originalHeight.toFloat / originalWidth * 256).toInt originalWidth = 256 } val transformer = RandomResize(256, 256) val transformed = transformer(data) val imageFeature = transformed.asInstanceOf[LocalImageFrame].array(0) val resizedHeight = imageFeature.getHeight val resizedWidth = imageFeature.getWidth originalHeight should be (resizedHeight) originalWidth should be (resizedWidth) val tmpFile = java.io.File.createTempFile("module", ".jpg") Imgcodecs.imwrite(tmpFile.toString, imageFeature.opencvMat()) println(tmpFile) } }
intel-analytics/BigDL
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/transform/vision/image/augmentation/RandomResizeSpec.scala
Scala
apache-2.0
2,032
/* * Copyright 2014 Treode, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.treode.async.stubs import java.util.ArrayDeque import com.treode.async.{Async, Callback, Scheduler} import com.treode.async.implicits._ import Async.async import Callback.fanout /** Capture an asynchronous call so you may be completed later. */ class AsyncCaptor [A] private (implicit scheduler: StubScheduler) { private val cbs = new ArrayDeque [Callback [A]] def outstanding: Int = cbs.size /** Simulate a call to an asynchronous function. Returns an Async to the caller that can be * completed later using `pass` or `fail`. Multiple calls to start are queued (FIFO). */ def start(): Async [A] = async { cb => cbs.add (cb) } /** Pass the next asynchronous function that was started earlier. */ def pass (v: A) { require (!cbs.isEmpty, "No outstanding asynchronous calls.") cbs.remove.pass (v) scheduler.run() } /** Fail the next asynchronous function that was started earlier. */ def fail (t: Throwable) { require (!cbs.isEmpty, "No outstanding asynchronous calls.") cbs.remove.fail (t) scheduler.run() }} object AsyncCaptor { def apply [A] (implicit scheduler: StubScheduler): AsyncCaptor [A] = new AsyncCaptor [A] }
Treode/store
core/stub/com/treode/async/stubs/AsyncCaptor.scala
Scala
apache-2.0
1,812
// Equites, a Scala chess playground // Copyright © 2015 Frank S. Thomas <frank@timepit.eu> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. package eu.timepit.equites.util import scalaz.Monoid object MonoidUtil { def product[A, B](implicit A: Monoid[A], B: Monoid[B]): Monoid[(A, B)] = new Monoid[(A, B)] { def append(x: (A, B), y: => (A, B)): (A, B) = (A.append(x._1, y._1), B.append(x._2, y._2)) val zero: (A, B) = (A.zero, B.zero) } }
equites-chess/equites-core
src/main/scala/eu/timepit/equites/util/MonoidUtil.scala
Scala
gpl-3.0
1,082
/* *\ ** _____ __ _____ __ ____ ** ** / ___/ / / /____/ / / / \ FieldKit ** ** / ___/ /_/ /____/ / /__ / / / (c) 2009, field.io ** ** /_/ /____/ /____/ /_____/ http://www.field.io ** \* */ /* created August 03, 2009 */ package field.kit.math.geometry import field.kit.math._ import scala.collection.mutable.ArrayBuffer import scala.annotation._ /** * * Direct port of Karsten Schmidts PointOctree.java to Scala/ FieldKit * * Implements a spatial subdivision tree to work efficiently with large numbers * of 3D particles. This octree can only be used for particle type objects and * does NOT support 3D mesh geometry as other forms of Octrees do. * * @see http://code.google.com/p/toxiclibs/source/browse/trunk/toxiclibs/src.core/toxi/geom/PointOctree.java */ class Octree[T <: Vec3](parent:Octree[T], val offset:Vec3, halfSize:Vec3) extends AABB(offset + halfSize, halfSize) { if(parent != null) this.minSize = parent.minSize /** * Constructs a new Octree root node */ def this(offset:Vec3, size:Vec3) = this(null, offset, size*0.5f) // ------------------------------------------------------------------------- /** * Alternative tree recursion limit, number of world units when cells are * not subdivided any further */ var minSize = 10f val treeDepth:Int = if(parent == null) 0 else parent.treeDepth + 1 protected var data:ArrayBuffer[T] = null /** * Stores the child nodes of this node */ var children:Array[Octree[T]] = null /** * the number of child nodes (max. 8) */ var numChildren = 0 /** * Defines wether this node automatically removes branches when a point was removed */ protected val isAutoReducing = false /** * Adds a new point/particle to the tree structure. All points are stored * within leaf nodes only. The tree implementation is using lazy * instantiation for all intermediate tree levels. * @param p * @return true, if point has been added successfully */ def insert(p:T):Boolean = { // tail-recursion optimised insert @tailrec def insertElement(n:Octree[T], p:T):Boolean = { if( !(n contains p) ) return false if(n.extent.x <= minSize || n.extent.y <= minSize || n.extent.z <= minSize) { if(n.data == null) n.data = new ArrayBuffer[T] n.data += p true } else { if(n.children == null) n.children = new Array[Octree[T]](8) val octant = n.octantID(p.x - n.offset.x, p.y - n.offset.y, p.z - n.offset.z) if(n.children(octant) == null) { val o = Vec3(n.offset) if((octant & 1) != 0) o.x += n.extent.x if((octant & 2) != 0) o.y += n.extent.y if((octant & 4) != 0) o.z += n.extent.z n.children(octant) = new Octree[T](this, o, n.extent * 0.5f) n.numChildren += 1 } insertElement(n.children(octant), p) } } insertElement(this, p) /* // check if point is inside cube if(!(this contains p)) return false // only add data to leaves for now if(extent.x <= minSize || extent.y <= minSize || extent.z <= minSize) { if(data == null) data = new ArrayBuffer[T] data += p return true } else { if(children == null) children = new Array[Octree[T]](8) val octant = octantID(p.x - offset.x, p.y - offset.y, p.z - offset.z) if(children(octant) == null) { val o = Vec3(offset) if((octant & 1) != 0) o.x += extent.x if((octant & 2) != 0) o.y += extent.y if((octant & 4) != 0) o.z += extent.z children(octant) = new Octree[T](this, o, extent * 0.5f) numChildren += 1 } children(octant) insert p } */ } /** * Removes a point from the tree and (optionally) tries to release memory by * reducing now empty sub-branches. * @param p point to delete * @return true, if the point was found & removed */ def remove(p:T):Boolean = { var found = false val leaf = findLeaf(p) if(leaf != null) { val sizeBefore = leaf.data.size leaf.data -= p if(leaf.data.size != sizeBefore) { found = true if(isAutoReducing && leaf.data.size == 0) leaf.reduceBranch } } found } /** * Tries to release memory by clearing up this branch */ protected def reduceBranch { if(data != null && data.size == 0) data = null if(numChildren > 0) { for(i <- 0 until 8) { val child = children(i) if(child != null && child.data == null) children(i) = null } } if(parent != null) parent.reduceBranch } /** * Selects all stored points within the given axis-aligned bounding box. * * @param box AABB * @param result the ArrayBuffer * @return all points with the box volume */ def find(bounds:BoundingVolume, result:ArrayBuffer[T]):ArrayBuffer[T] = { // find using tail-recursion optimisation val r = if(result == null) new ArrayBuffer[T] else result @tailrec def findPoints(n:Octree[T]) { if(n == null) return if(n intersects bounds) { if(n.data != null) { var i = 0 while(i < n.data.length) { val p = n.data(i) if(bounds contains p) r += p i += 1 } } else if(n.numChildren > 0) { findPoints(n.children(0)) findPoints(n.children(1)) findPoints(n.children(2)) findPoints(n.children(3)) findPoints(n.children(4)) findPoints(n.children(5)) findPoints(n.children(6)) findPoints(n.children(7)) } } } findPoints(this) r /* val r = if(result == null) new ArrayBuffer[T] else result if (this intersects box) { if(data != null) { var i = 0 while(i < data.length) { val p = data(i) if(box contains p) r += p i += 1 } } else if(numChildren > 0) { var i = 0 while(i < 8) { val child = children(i) if(child != null) child(box, result) i += 1 } } } r */ } /** * Alias for find */ def apply(bounds:BoundingVolume, result:ArrayBuffer[T]) = find(bounds, result) // /** // * Selects all stored points within the given sphere volume // */ // def apply(sphere:Sphere, result:ArrayBuffer[T]):ArrayBuffer[T] = { // val r = if(result == null) new ArrayBuffer[T] else result // // if (this intersects sphere) { // if(data != null) { // var i = 0 // while(i < data.length) { // val p = data(i) // if(sphere contains p) // r += p // i += 1 // } // } else if(numChildren > 0) { // var i = 0 // while(i < 8) { // val child = children(i) // if(child != null) // child(sphere, result) // i += 1 // } // } // } // r // } /** * Finds the leaf node which spatially relates to the given point * * @param p point to check * @return leaf node or null if point is outside the tree dimensions */ def findLeaf(p:T):Octree[T] = { // if not a leaf node... if (this contains p) { if(numChildren > 0) { val octant = octantID(p.x - x, p.y - y, p.z - z) if(children(octant) != null) return children(octant).findLeaf(p) } else if(data != null) { return this } } null } /** * Clears all children and data of this node */ def clear { // TODO consider just clearing the arrays to avoid the cost of recreating them numChildren = 0 children = null data = null } /** * Computes the local child octant/cube index for the given point * @param plocal point in the node-local coordinate system * @return octant index */ protected final def octantID(x:Float, y:Float, z:Float):Int = { var id = 0 if(x >= extent.x) id += 1 if(y >= extent.y) id += 2 if(z >= extent.z) id += 4 id } override def toString = "Octree[X"+ x +" Y"+ y +"Z"+ z +" extent X"+ extent.x +" Y"+ extent.y +" Z"+ extent.z +"]" }
field/FieldKit.scala
src/field/kit/math/geometry/Octree.scala
Scala
lgpl-3.0
8,001
package forms import play.api.libs.json.Json /** * Created by Leandro on 05/09/2016. */ object GraduateForms { case class GraduateData(firstName: String, lastName: String, dni: String, studentCode:String, birthday: String, birthmonth: String, birthyear: String, entryday: String, entrymonth: String, entryyear: String, graduationday: String, graduationmonth: String, graduationyear:String, carreer: String) implicit val signupFormat = Json.format[GraduateData] }
TVilaboa/Egresados
app/forms/GraduateForms.scala
Scala
gpl-3.0
475
package be.studiocredo.aws import java.nio.file.{Files, Paths} import java.security.spec.PKCS8EncodedKeySpec import java.security.{KeyFactory, PrivateKey} import be.studiocredo.{AssetService, Service} import com.amazonaws.auth.AWSCredentials import com.amazonaws.regions.Regions import com.amazonaws.services.cloudfront.model.{DistributionSummary, ListDistributionsRequest} import com.amazonaws.services.cloudfront.{AmazonCloudFrontClient, CloudFrontUrlSigner} import com.amazonaws.services.s3.model.ListObjectsRequest import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client} import com.amazonaws.util.Base64 import com.google.common.io.Resources import com.google.inject.Inject import models.entities.{Asset, User} import org.apache.commons.io.FilenameUtils import org.apache.http.client.utils.URIBuilder import org.joda.time.format.ISOPeriodFormat import org.joda.time.{DateTime, Period} import play.api.{Configuration, Play} import scala.util.{Failure, Try} object Logger { val logger = play.api.Logger("be.studiocredo.aws.download") } object DownloadConfiguration { def init(configuration: Configuration): Option[DownloadConfiguration] = { Try(new DownloadConfiguration(configuration)).recoverWith { case t: Throwable => Logger.logger.error("Failed to initialize download configuration", t) Failure(t) }.toOption } val CUSTOMER_ID_PARAMETER: String = "CustomerId" } class DownloadConfiguration(val configuration: Configuration) extends AWSCredentials { val accessKey: String = configuration.getString(AwsConfigKeys.accessKey).get val secretKey: String = configuration.getString(AwsConfigKeys.secretKey).get val s3: AmazonS3Client = Try { configuration.getString(AwsConfigKeys.s3Region).map(Regions.fromName).foldLeft(new AmazonS3Client(this)) { (client, region) => client.withRegion(region).asInstanceOf[AmazonS3Client] } }.recoverWith { case t: Throwable => Logger.logger.error("Failed to initialize AWS S3 client", t) Failure(t) }.get val cloudFront: AmazonCloudFrontClient = Try { configuration.getString(AwsConfigKeys.cfRegion).map(Regions.fromName).foldLeft(new AmazonCloudFrontClient(this)) { (client, region) => client.withRegion(region).asInstanceOf[AmazonCloudFrontClient] } }.recoverWith { case t: Throwable => Logger.logger.error("Failed to initialize AWS CloudFront client", t) Failure(t) }.get val bucketName: String = configuration.getString(AwsConfigKeys.s3BucketName).get val distributionDomain: String = { val distribution: DistributionSummary = getCloudFrontDistribution(cloudFront, bucketName) if (configuration.getBoolean(AwsConfigKeys.cfUseAlias).get && !distribution.getAliases.getItems.isEmpty) distribution.getAliases.getItems.get(0) else distribution.getDomainName } val validPeriod: Period = configuration.getString(AwsConfigKeys.cfUrlValidity).map(ISOPeriodFormat.standard.parsePeriod).get val expirationDate: DateTime = new DateTime().plus(validPeriod) val keyPairId: String = configuration.getString(AwsConfigKeys.keyPairId).get val keyPairPrivateKeyResource: Option[String] = configuration.getString(AwsConfigKeys.keyPairPrivateKeyResource) val keyPairPrivateKeyPath: Option[String] = configuration.getString(AwsConfigKeys.keyPairPrivateKeyPath) val derPrivateKey: Array[Byte] = keyPairPrivateKeyResource.fold { val path = keyPairPrivateKeyPath.get Files.readAllBytes(Paths.get(path)) } { resource => Resources.toByteArray(Resources.getResource(resource)) } val keySpec: PKCS8EncodedKeySpec = new PKCS8EncodedKeySpec(derPrivateKey) val keyFactory: KeyFactory = KeyFactory.getInstance("RSA") val key: PrivateKey = keyFactory.generatePrivate(keySpec) override def getAWSAccessKeyId: String = accessKey override def getAWSSecretKey: String = secretKey protected def getCloudFrontDistribution(cloudFront: AmazonCloudFrontClient, bucketName: String): DistributionSummary = { import scala.collection.JavaConverters._ val originId = getCloudFrontOriginId(bucketName) val summary = Try { cloudFront.listDistributions(new ListDistributionsRequest).getDistributionList.getItems.asScala.find { summary => summary.getOrigins.getItems.asScala.exists { origin => origin.getId == originId } }.get } recoverWith { case t: Throwable => Logger.logger.error(s"CloudFront distribution for origin $originId not found", t) Failure(t) } summary.get } protected def getCloudFrontOriginId(bucketName: String): String = s"s3-$bucketName" } class DownloadService @Inject()(assetService: AssetService) extends Service { var configuration: Option[DownloadConfiguration] = None override def onStart() { Logger.logger.debug("Starting download service") configuration = DownloadConfiguration.init(Play.current.configuration) } def getDownloadUrl(asset: Asset, user: User): Option[String] = asset.objectKey.flatMap { objectKey => configuration.flatMap { c => findObject(c.s3, c.bucketName, objectKey).flatMap { s3Object => val fileName: String = FilenameUtils.getName(objectKey) val fileSize: Long = s3Object.getSize getSignedUrlForObjectKey(c, objectKey, user, fileName, fileSize) } } } override def onStop(): Unit = { Logger.logger.debug("Stopping download service") } private def findObject(s3: AmazonS3, bucketName: String, objectKey: String) = { import scala.collection.JavaConverters._ s3.listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix(objectKey)).getObjectSummaries.asScala.headOption } def getCustomerId(user: User): String = Seq(user.id.toString, user.username, user.name).mkString("/") private def getSignedUrlForObjectKey(configuration: DownloadConfiguration, objectKey: String, user: User, fileName: String, size: Long): Option[String] = { val maybeUrl = Try { // Signed URLs for a private distribution // Note that Java only supports SSL certificates in DER format, // so you will need to convert your PEM-formatted file to DER format. // To do this, you can use openssl: // openssl pkcs8 -topk8 -nocrypt -in origin.pem -inform PEM -out new.der // -outform DER // So the encoder works correctly, you should also add the bouncy castle jar // to your project and then add the provider. val path = if (objectKey.startsWith("/")) objectKey else "/" + objectKey val customerId = getCustomerId(user) val policyResourcePath: String = new URIBuilder().setScheme("http").setHost(configuration.distributionDomain).setPath(path).addParameter(DownloadConfiguration.CUSTOMER_ID_PARAMETER, Base64.encodeAsString(customerId.getBytes: _*)).build.toString CloudFrontUrlSigner.getSignedURLWithCannedPolicy(policyResourcePath, configuration.keyPairId, configuration.key, configuration.expirationDate.toDate) } recoverWith { case t: Throwable => Logger.logger.error("Could not generate signed url", t) Failure(t) } maybeUrl.toOption } }
studiocredo/ticket-reservation
app/be/studiocredo/aws/DownloadService.scala
Scala
apache-2.0
7,079
package k2b6s9j.singingKIA.Songs object Portal2 { }
kepler0/singingKIA
src/main/scala/k2b6s9j/singingKIA/Songs/Portal2.scala
Scala
mit
54
package org.hammerlab.bam.check.indexed import hammerlab.iterator._ import org.hammerlab.bam.check import org.hammerlab.bam.check.Checker.MakeChecker import org.hammerlab.bam.check.{ MaxReadSize, ReadStartFinder } import org.hammerlab.bgzf.Pos import org.hammerlab.channel.{ CachingChannel, SeekableByteChannel } import scala.collection.immutable.SortedSet case class Checker(readPositions: SortedSet[Pos]) extends check.Checker[Boolean] with ReadStartFinder { override def apply(pos: Pos): Boolean = readPositions(pos) override def nextReadStart(start: Pos)( implicit maxReadSize: MaxReadSize ): Option[Pos] = readPositions .iteratorFrom(start) .buffered .headOption } object Checker { implicit def makeChecker(implicit records: SortedSet[Pos]): MakeChecker[Boolean, Checker] = new MakeChecker[Boolean, Checker] { override def apply(ch: CachingChannel[SeekableByteChannel]): Checker = Checker(records) } }
ryan-williams/spark-bam
check/src/main/scala/org/hammerlab/bam/check/indexed/Checker.scala
Scala
apache-2.0
987
package com.dividezero.stubby.core.service.model import org.scalatest.FunSuite class ParamPatternTest extends FunSuite { val instance1 = new ParamPattern("foo", new TextPattern("bar")) val instance2 = new ParamPattern("foo", new TextPattern("bar")) test("equality") { assert(instance1 === instance2) } test("hash code") { assert(instance1.hashCode === instance2.hashCode) } }
themillhousegroup/http-stub-server-scala
core/src/test/scala/com/dividezero/stubby/core/service/model/ParamPatternTest.scala
Scala
apache-2.0
401
package io.cumulus.views.email import io.cumulus.Settings import io.cumulus.views.View import play.api.i18n.Messages import scalatags.Text.all._ /** * Template for Cumulus mails. The template should be compatible with the majority of * web client. */ trait CumulusEmailTemplate extends View { protected def settings: Settings protected def messages: Messages override lazy val content: Frag = raw(rawContent) protected val mailTitle: String = messages("email.title") protected def mailContentTitle: String protected def mailContent: Seq[Tag] protected val mailFooter: Tag = { span( messages("email.footer", settings.mail.from) ) } /** * Mail are an horrible format which can't really be used with scalatags, so the mail is instead * used as a large string.. */ protected lazy val rawContent: String = s""" |<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Transitional //EN" | "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> |<html xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office" | xmlns="http://www.w3.org/1999/xhtml"> |<head> | <!--[if gte mso 9]> | <xml> | <o:OfficeDocumentSettings> | <o:AllowPNG/> | <o:PixelsPerInch>96</o:PixelsPerInch> | </o:OfficeDocumentSettings> | </xml> | <![endif]--> | | <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> | <meta name="viewport" content="width=device-width"> | <!--[if !mso]><!--> | <meta http-equiv="X-UA-Compatible" content="IE=edge"><!--<![endif]--> | <title></title> | <!--[if !mso]><!-- --> | <link href="https://fonts.googleapis.com/css?family=Lato" rel="stylesheet" type="text/css"> | <link href="https://fonts.googleapis.com/css?family=Droid+Serif" rel="stylesheet" type="text/css"> | <link href="https://fonts.googleapis.com/css?family=Montserrat" rel="stylesheet" type="text/css"> | <!--<![endif]--> | | <style type="text/css" id="media-query"> | body { | margin: 0; | padding: 0; | } | | table, tr, td { | vertical-align: top; | border-collapse: collapse; | } | | .ie-browser table, .mso-container table { | table-layout: fixed; | } | | * { | line-height: inherit; | } | | a[x-apple-data-detectors=true] { | color: inherit !important; | text-decoration: none !important; | } | | [owa] .img-container div, [owa] .img-container button { | display: block !important; | } | | [owa] .fullwidth button { | width: 100% !important; | } | | [owa] .block-grid .col { | display: table-cell; | float: none !important; | vertical-align: top; | } | | .ie-browser .num12, .ie-browser .block-grid, [owa] .num12, [owa] .block-grid { | width: 640px !important; | } | | .ExternalClass, .ExternalClass p, .ExternalClass span, .ExternalClass font, .ExternalClass td, .ExternalClass div { | line-height: 100%; | } | | .ie-browser .mixed-two-up .num4, [owa] .mixed-two-up .num4 { | width: 212px !important; | } | | .ie-browser .mixed-two-up .num8, [owa] .mixed-two-up .num8 { | width: 424px !important; | } | | .ie-browser .block-grid.two-up .col, [owa] .block-grid.two-up .col { | width: 320px !important; | } | | .ie-browser .block-grid.three-up .col, [owa] .block-grid.three-up .col { | width: 213px !important; | } | | .ie-browser .block-grid.four-up .col, [owa] .block-grid.four-up .col { | width: 160px !important; | } | | .ie-browser .block-grid.five-up .col, [owa] .block-grid.five-up .col { | width: 128px !important; | } | | .ie-browser .block-grid.six-up .col, [owa] .block-grid.six-up .col { | width: 106px !important; | } | | .ie-browser .block-grid.seven-up .col, [owa] .block-grid.seven-up .col { | width: 91px !important; | } | | .ie-browser .block-grid.eight-up .col, [owa] .block-grid.eight-up .col { | width: 80px !important; | } | | .ie-browser .block-grid.nine-up .col, [owa] .block-grid.nine-up .col { | width: 71px !important; | } | | .ie-browser .block-grid.ten-up .col, [owa] .block-grid.ten-up .col { | width: 64px !important; | } | | .ie-browser .block-grid.eleven-up .col, [owa] .block-grid.eleven-up .col { | width: 58px !important; | } | | .ie-browser .block-grid.twelve-up .col, [owa] .block-grid.twelve-up .col { | width: 53px !important; | } | | @media only screen and (min-width: 660px) { | .block-grid { | width: 640px !important; | } | | .block-grid .col { | vertical-align: top; | } | | .block-grid .col.num12 { | width: 640px !important; | } | | .block-grid.mixed-two-up .col.num4 { | width: 212px !important; | } | | .block-grid.mixed-two-up .col.num8 { | width: 424px !important; | } | | .block-grid.two-up .col { | width: 320px !important; | } | | .block-grid.three-up .col { | width: 213px !important; | } | | .block-grid.four-up .col { | width: 160px !important; | } | | .block-grid.five-up .col { | width: 128px !important; | } | | .block-grid.six-up .col { | width: 106px !important; | } | | .block-grid.seven-up .col { | width: 91px !important; | } | | .block-grid.eight-up .col { | width: 80px !important; | } | | .block-grid.nine-up .col { | width: 71px !important; | } | | .block-grid.ten-up .col { | width: 64px !important; | } | | .block-grid.eleven-up .col { | width: 58px !important; | } | | .block-grid.twelve-up .col { | width: 53px !important; | } | } | | @media (max-width: 660px) { | .block-grid, .col { | min-width: 320px !important; | max-width: 100% !important; | display: block !important; | } | | .block-grid { | width: calc(100% - 40px) !important; | } | | .col { | width: 100% !important; | } | | .col > div { | margin: 0 auto; | } | | img.fullwidth, img.fullwidthOnMobile { | max-width: 100% !important; | } | | .no-stack .col { | min-width: 0 !important; | display: table-cell !important; | } | | .no-stack.two-up .col { | width: 50% !important; | } | | .no-stack.mixed-two-up .col.num4 { | width: 33% !important; | } | | .no-stack.mixed-two-up .col.num8 { | width: 66% !important; | } | | .no-stack.three-up .col.num4 { | width: 33% !important; | } | | .no-stack.four-up .col.num3 { | width: 25% !important; | } | | .mobile_hide { | min-height: 0px; | max-height: 0px; | max-width: 0px; | display: none; | overflow: hidden; | font-size: 0px; | } | } | | </style> |</head> | | |<body class="clean-body" style="margin: 0;padding: 0;-webkit-text-size-adjust: 100%;background-color: #F4F4F4"> |<style type="text/css" id="media-query-bodytag"> | @media (max-width: 520px) { | .block-grid { | min-width: 320px !important; | max-width: 100% !important; | width: 100% !important; | display: block !important; | } | | .col { | min-width: 320px !important; | max-width: 100% !important; | width: 100% !important; | display: block !important; | } | | .col > div { | margin: 0 auto; | } | | img.fullwidth { | max-width: 100% !important; | } | | img.fullwidthOnMobile { | max-width: 100% !important; | } | | .no-stack .col { | min-width: 0 !important; | display: table-cell !important; | } | | .no-stack.two-up .col { | width: 50% !important; | } | | .no-stack.mixed-two-up .col.num4 { | width: 33% !important; | } | | .no-stack.mixed-two-up .col.num8 { | width: 66% !important; | } | | .no-stack.three-up .col.num4 { | width: 33% !important; | } | | .no-stack.four-up .col.num3 { | width: 25% !important; | } | | .mobile_hide { | min-height: 0px !important; | max-height: 0px !important; | max-width: 0px !important; | display: none !important; | overflow: hidden !important; | font-size: 0px !important; | } | } |</style> |<!--[if IE]> |<div class="ie-browser"><![endif]--> |<!--[if mso]> |<div class="mso-container"><![endif]--> |<table class="nl-container" | style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;min-width: 320px;Margin: 0 auto;background-color: #F4F4F4;width: 100%" | cellpadding="0" cellspacing="0"> | <tbody> | <tr style="vertical-align: top"> | <td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top"> | <!--[if (mso)|(IE)]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td align="center" style="background-color: #F4F4F4;"><![endif]--> | | <div style="background-color:transparent;"> | <div style="Margin: 0 auto;min-width: 320px;max-width: 640px;overflow-wrap: break-word;word-wrap: break-word;word-break: break-word;background-color: #f2fbfa;" class="block-grid "> | <div style="border-collapse: collapse;display: table;width: 100%;background-color:#f2fbfa;"> | <!--[if (mso)|(IE)]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="background-color:transparent;" align="center"> | <table cellpadding="0" cellspacing="0" border="0" style="width: 640px;"> | <tr class="layout-full-width" style="background-color:#f2fbfa;"> | <![endif]--> | | <!--[if (mso)|(IE)]> | <td align="center" width="640" style=" width:640px; padding-right: 0px; padding-left: 0px; padding-top:5px; padding-bottom:5px; border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent;" valign="top"> | <![endif]--> | <div class="col num12" style="min-width: 320px;max-width: 640px;display: table-cell;vertical-align: top;"> | <div style="background-color: transparent; width: 100% !important;"> | <!--[if (!mso)&(!IE)]><!--> | <div | style="border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent; padding-top:5px; padding-bottom:5px; padding-right: 0px; padding-left: 0px;"> | <!--<![endif]--> | | | <div class=""> | <!--[if mso]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="padding-right: 10px; padding-left: 10px; padding-top: 10px; padding-bottom: 10px;"> | <![endif]--> | <div style="color:#3dc7be;line-height:120%;font-family:'Lato', Tahoma, Verdana, Segoe, sans-serif; padding-right: 10px; padding-left: 10px; padding-top: 10px; padding-bottom: 10px;"> | <div style="font-size:12px;line-height:14px;font-family:Lato, Tahoma, Verdana, Segoe, sans-serif;color:#3dc7be;text-align:left;"> | <p style="margin: 0;font-size: 14px;line-height: 17px"> | <img border="0" src="${settings.host.url}/assets/cumulus-logo.png" alt="Logo" title="Logo" style="outline: none;text-decoration: none;-ms-interpolation-mode: bicubic;clear: both;display: inline; border: 0; height: 25px; float: none; width: auto; padding-right: 7px;" height="25"> | <span style="font-size: 20px; line-height: 24px;">$mailTitle</span> | </p> | </div> | </div> | <!--[if mso]></td></tr></table><![endif]--> | </div> | | <!--[if (!mso)&(!IE)]><!--></div><!--<![endif]--> | </div> | </div> | <!--[if (mso)|(IE)]></td></tr></table></td></tr></table><![endif]--> | </div> | </div> | </div> | <div style="background-color:transparent;"> | <div style="Margin: 0 auto;min-width: 320px;max-width: 640px;overflow-wrap: break-word;word-wrap: break-word;word-break: break-word;background-color: #FFFFFF;" class="block-grid "> | <div style="border-collapse: collapse;display: table;width: 100%;background-color:#FFFFFF;"> | <!--[if (mso)|(IE)]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="background-color:transparent;" align="center"> | <table cellpadding="0" cellspacing="0" border="0" style="width: 640px;"> | <tr class="layout-full-width" style="background-color:#FFFFFF;"><![endif]--> | | <!--[if (mso)|(IE)]> | <td align="center" width="640" | style=" width:640px; padding-right: 0px; padding-left: 0px; padding-top:0px; padding-bottom:0px; border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent;" | valign="top"><![endif]--> | <div class="col num12" | style="min-width: 320px;max-width: 640px;display: table-cell;vertical-align: top;"> | <div style="background-color: transparent; width: 100% !important;"> | <!--[if (!mso)&(!IE)]><!--> | <div | style="border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent; padding-top:0px; padding-bottom:0px; padding-right: 0px; padding-left: 0px;"> | <!--<![endif]--> | | | <div align="center" class="img-container center autowidth fullwidth " style="padding-right: 0px; padding-left: 0px;"> | <!--[if mso]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr style="line-height:0px;line-height:0px;"> | <td style="padding-right: 0px; padding-left: 0px;" align="center"> | <![endif]--> | <img class="center autowidth fullwidth" align="center" border="0" | src="${settings.host.url}/assets/mail.jpg" alt="Image" title="Image" | style="outline: none;text-decoration: none;-ms-interpolation-mode: bicubic;clear: both;display: block !important;border: 0;height: auto;float: none;width: 100%;max-width: 640px" | width="640"> | <!--[if mso]></td></tr></table><![endif]--> | </div> | | | <table border="0" cellpadding="0" cellspacing="0" width="100%" class="divider " style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;min-width: 100%;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td class="divider_inner" style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;padding-right: 10px;padding-left: 10px;padding-top: 10px;padding-bottom: 10px;min-width: 100%;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <table class="divider_content" align="center" border="0" cellpadding="0" cellspacing="0" width="100%" style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;border-top: 0px solid transparent;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <span></span> | </td> | </tr> | </tbody> | </table> | </td> | </tr> | </tbody> | </table> | | | <div class=""> | <!--[if mso]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="padding-right: 10px; padding-left: 10px; padding-top: 30px; padding-bottom: 20px;"> | <![endif]--> | <div style="line-height:120%;color:#6f6f6f;font-family:'Lato', Tahoma, Verdana, Segoe, sans-serif; padding-right: 10px; padding-left: 10px; padding-top: 30px; padding-bottom: 20px;"> | <div style="font-size:12px;line-height:14px;font-family:Lato, Tahoma, Verdana, Segoe, sans-serif;color:#6f6f6f;text-align:left;"> | <p style="margin: 0;font-size: 14px;line-height: 17px;text-align: center"> | <strong><span style="font-size: 48px; line-height: 57px;">$mailContentTitle</span></strong> | </p></div> | </div> | <!--[if mso]></td></tr></table><![endif]--> | </div> | | | <table border="0" cellpadding="0" cellspacing="0" width="100%" class="divider " style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;min-width: 100%;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td class="divider_inner" style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;padding-right: 10px;padding-left: 10px;padding-top: 10px;padding-bottom: 10px;min-width: 100%;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <table class="divider_content" height="0px" align="center" border="0" cellpadding="0" cellspacing="0" width="100%" style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;border-top: 1px solid #CFCFCF;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;font-size: 0px;line-height: 0px;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <span>&#160;</span> | </td> | </tr> | </tbody> | </table> | </td> | </tr> | </tbody> | </table> | | ${mailContent.map { c => s""" | <div class=""> | <!--[if mso]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="padding-right: 35px; padding-left: 35px; padding-top: 20px; padding-bottom: 25px;"> | <![endif]--> | <div style="line-height:180%;color:#555555;font-family:'Lato', Tahoma, Verdana, Segoe, sans-serif; padding-right: 35px; padding-left: 35px; padding-top: 20px; padding-bottom: 25px;"> | <div style="font-size:12px;line-height:22px;font-family:Lato, Tahoma, Verdana, Segoe, sans-serif;color:#555555;text-align:left;"> | <p style="margin: 0;font-size: 14px;line-height: 25px;text-align: justify"> | <span style="font-size: 18px; line-height: 32px;"> | ${c.render} | </span> | </p> | </div> | </div> | <!--[if mso]></td></tr></table><![endif]--> | </div> """.stripMargin }.mkString("", " ", "")} | | <!--[if (!mso)&(!IE)]><!--></div><!--<![endif]--> | </div> | </div> | <!--[if (mso)|(IE)]></td></tr></table></td></tr></table><![endif]--> | </div> | </div> | </div> | <div style="background-color:transparent;"> | <div style="Margin: 0 auto;min-width: 320px;max-width: 640px;overflow-wrap: break-word;word-wrap: break-word;word-break: break-word;background-color: #FFFFFF; class="block-grid "> | <div style="border-collapse: collapse;display: table;width: 100%;background-color:#FFFFFF;"> | <!--[if (mso)|(IE)]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="background-color:transparent;" align="center"> | <table cellpadding="0" cellspacing="0" border="0" style="width: 640px;"> | <tr class="layout-full-width" style="background-color:#FFFFFF;"><![endif]--> | | <!--[if (mso)|(IE)]> | <td align="center" width="640" style=" width:640px; padding-right: 0px; padding-left: 0px; padding-top:5px; padding-bottom:5px; border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent;" valign="top"><![endif]--> | <div class="col num12" style="min-width: 320px;max-width: 640px;display: table-cell;vertical-align: top;"> | <div style="background-color: transparent; width: 100% !important;"> | <!--[if (!mso)&(!IE)]><!--> | <div style="border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent; padding-top:5px; padding-bottom:5px; padding-right: 0px; padding-left: 0px;"> | <!--<![endif]--> | | | <div class=""> | <!--[if mso]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="padding-right: 15px; padding-left: 15px; padding-top: 20px; padding-bottom: 15px;"> | <![endif]--> | <div | style="line-height:120%;color:#0D0D0D;font-family:'Droid Serif', Georgia, Times, 'Times New Roman', serif; padding-right: 15px; padding-left: 15px; padding-top: 20px; padding-bottom: 15px;"> | <div style="font-size:12px;line-height:14px;font-family:'Droid Serif',Georgia,Times,'Times New Roman',serif;color:#0D0D0D;text-align:left;"> | <p style="margin: 0;font-size: 14px;line-height: 17px;text-align: center"> | <span style="font-size: 12px; line-height: 14px;"> | ${mailFooter.render} | </span> | </p> | </div> | </div> | <!--[if mso]></td></tr></table><![endif]--> | </div> | | | <div align="center" style="padding-right: 10px; padding-left: 10px; padding-bottom: 10px;" class=""> | <div style="line-height:10px;font-size:1px">&#160;</div> | <div style="display: table; max-width:77px;"> | <!--[if (mso)|(IE)]> | <table width="57" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="border-collapse:collapse; padding-right: 10px; padding-left: 10px; padding-bottom: 10px;" | align="center"> | <table width="100%" cellpadding="0" cellspacing="0" border="0" style="border-collapse:collapse; mso-table-lspace: 0pt;mso-table-rspace: 0pt; width:57px;"> | <tr> | <td width="32" style="width:32px; padding-right: 5px;" valign="top"> | <![endif]--> | <table align="left" border="0" cellspacing="0" cellpadding="0" width="32" height="32" style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;Margin-right: 0"> | <tbody> | <tr style="vertical-align: top"> | <td align="left" valign="middle" style="word-break: break-word;border-collapse: collapse !important;vertical-align: top"> | <a href="https://github.com/Cumulus-Cloud/cumulus" title="Github" target="_blank"> | <img src="${settings.host.url}/assets/github-logo.png" alt="Github" title="Github" width="32" style="outline: none;text-decoration: none;-ms-interpolation-mode: bicubic;clear: both;display: block !important;border: none;height: auto;float: none;max-width: 32px !important"> | </a> | <div style="line-height:5px;font-size:1px">&#160;</div> | </td> | </tr> | </tbody> | </table> | <!--[if (mso)|(IE)]></td></tr></table></td></tr></table><![endif]--> | </div> | </div> | | <!--[if (!mso)&(!IE)]><!--></div><!--<![endif]--> | </div> | </div> | <!--[if (mso)|(IE)]></td></tr></table></td></tr></table><![endif]--> | </div> | </div> | </div> | <div style="background-color:transparent;"> | <div style="Margin: 0 auto;min-width: 320px;max-width: 640px;overflow-wrap: break-word;word-wrap: break-word;word-break: break-word;background-color: #FFFFFF;" class="block-grid "> | <div style="border-collapse: collapse;display: table;width: 100%;background-color:#FFFFFF;"> | <!--[if (mso)|(IE)]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="background-color:transparent;" align="center"> | <table cellpadding="0" cellspacing="0" border="0" style="width: 640px;"> | <tr class="layout-full-width" style="background-color:#FFFFFF;"><![endif]--> | | <!--[if (mso)|(IE)]> | <td align="center" width="640" style=" width:640px; padding-right: 0px; padding-left: 0px; padding-top:5px; padding-bottom:0px; border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent;" valign="top"><![endif]--> | <div class="col num12" style="min-width: 320px;max-width: 640px;display: table-cell;vertical-align: top;"> | <div style="background-color: transparent; width: 100% !important;"> | <!--[if (!mso)&(!IE)]><!--> | <div style="border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent; padding-top:5px; padding-bottom:0px; padding-right: 0px; padding-left: 0px;"> | <!--<![endif]--> | | <table border="0" cellpadding="0" cellspacing="0" width="100%" class="divider " style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;min-width: 100%;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td class="divider_inner" style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;padding-right: 0px;padding-left: 0px;padding-top: 0px;padding-bottom: 0px;min-width: 100%;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <table class="divider_content" align="center" border="0" cellpadding="0" cellspacing="0" width="100%" style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;border-top: 10px solid #3b3b3b;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <span></span> | </td> | </tr> | </tbody> | </table> | </td> | </tr> | </tbody> | </table> | | <!--[if (!mso)&(!IE)]><!--></div><!--<![endif]--> | </div> | </div> | <!--[if (mso)|(IE)]></td></tr></table></td></tr></table><![endif]--> | </div> | </div> | </div> | <div style="background-color:transparent;"> | <div style="Margin: 0 auto;min-width: 320px;max-width: 640px;overflow-wrap: break-word;word-wrap: break-word;word-break: break-word;background-color: transparent;" class="block-grid "> | <div style="border-collapse: collapse;display: table;width: 100%;background-color:transparent;"> | <!--[if (mso)|(IE)]> | <table width="100%" cellpadding="0" cellspacing="0" border="0"> | <tr> | <td style="background-color:transparent;" align="center"> | <table cellpadding="0" cellspacing="0" border="0" style="width: 640px;"> | <tr class="layout-full-width" style="background-color:transparent;"><![endif]--> | | <!--[if (mso)|(IE)]> | <td align="center" width="640" style=" width:640px; padding-right: 0px; padding-left: 0px; padding-top:5px; padding-bottom:5px; border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent;" valign="top"><![endif]--> | <div class="col num12" | style="min-width: 320px;max-width: 640px;display: table-cell;vertical-align: top;"> | <div style="background-color: transparent; width: 100% !important;"> | <!--[if (!mso)&(!IE)]><!--> | <div style="border-top: 0px solid transparent; border-left: 0px solid transparent; border-bottom: 0px solid transparent; border-right: 0px solid transparent; padding-top:5px; padding-bottom:5px; padding-right: 0px; padding-left: 0px;"> | <!--<![endif]--> | | <table border="0" cellpadding="0" cellspacing="0" width="100%" class="divider " style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;min-width: 100%;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td class="divider_inner" | style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;padding-right: 10px;padding-left: 10px;padding-top: 10px;padding-bottom: 10px;min-width: 100%;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <table class="divider_content" align="center" border="0" cellpadding="0" cellspacing="0" width="100%" style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;border-top: 0px solid transparent;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <tbody> | <tr style="vertical-align: top"> | <td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%"> | <span></span> | </td> | </tr> | </tbody> | </table> | </td> | </tr> | </tbody> | </table> | | <!--[if (!mso)&(!IE)]><!--></div><!--<![endif]--> | </div> | </div> | <!--[if (mso)|(IE)]></td></tr></table></td></tr></table><![endif]--> | </div> | </div> | </div> | <!--[if (mso)|(IE)]></td></tr></table><![endif]--> | </td> | </tr> | </tbody> |</table> |<!--[if (mso)|(IE)]></div><![endif]--> | |</body> |</html> """.stripMargin }
Cumulus-Cloud/cumulus
server/cumulus-core/src/main/scala/io/cumulus/views/email/CumulusEmailTemplate.scala
Scala
mit
37,894
package hr.element.beepo package core import net.liftweb._ import http._ import sitemap._ import org.slf4j.LoggerFactory class Boot extends Bootable with Logger{ def boot { LiftRules.addToPackages("hr.element.beepo.core") LiftRules.statelessDispatchTable.append(Api) LiftRules.statelessDispatchTable.append(MockRest) LiftRules.early.append(_ setCharacterEncoding "UTF-8") } }
element-doo/beepo
code/scala/core/src/main/scala/hr/element/beepo/core/Boot.scala
Scala
bsd-3-clause
402
package com.microsoft.awt.components import com.microsoft.awt.models._ import org.scalajs.angularjs.AngularJsHelper._ import org.scalajs.angularjs._ import org.scalajs.angularjs.fileupload.nervgh.{FileItem, FileUploader, FileUploaderConfig} import org.scalajs.angularjs.toaster.Toaster import org.scalajs.dom.browser.console import org.scalajs.nodejs.util.ScalaJsHelper._ import org.scalajs.sjs.JsUnderOrHelper._ import scala.concurrent.duration._ import scala.language.postfixOps import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue import scala.scalajs.js import scala.util.{Failure, Success} /** * Posting Capabilities * @author lawrence.daniels@gmail.com */ trait PostingCapabilities { self: Controller => def $scope: PostingCapabilitiesScope def fileUploader: FileUploader def postService: PostService def sessionFactory: SessionFactory def $timeout: Timeout def toaster: Toaster def userFactory: UserFactory // define the last post containing a file upload private var lastUploadedPost: Option[Post] = None $scope.posts = emptyArray $scope.tags = emptyArray // initialize the file uploader $scope.uploader = FileUploader(fileUploader, FileUploaderConfig(url = "/api/post/@postID/attachment/@userID")) /////////////////////////////////////////////////////////////////////////// // Post Functions /////////////////////////////////////////////////////////////////////////// $scope.deletePost = (aPost: js.UndefOr[Post]) => { for { user <- sessionFactory.user userID <- user._id post <- aPost postID <- post._id } { if ($scope.isDeletable(aPost) ?== true) { post.deleteLoading = true postService.deletePost(postID) onComplete { case Success(result) => $scope.$apply(() => post.deleteLoading = false) if (result.success && removePostFromList(post)) { toaster.success("Post deleted") } case Failure(e) => $scope.$apply(() => post.deleteLoading = false) console.error(s"Failed while delete the post ($postID) for userID ($userID): ${e.displayMessage}") toaster.error("Error deleting post", e.displayMessage) } } else { toaster.warning("Access denied", "You cannot delete this post") } } } private def removePostFromList(post: Post) = { val index = $scope.posts.indexWhere(_._id ?== post._id) val found = index != -1 if (found) $scope.$apply(() => $scope.posts.splice(index, 1)) found } $scope.isDeletable = (aPost: js.UndefOr[Post]) => { for { post <- aPost user <- sessionFactory.user } yield user._id ?== post.submitterId } $scope.isLikedPost = (aPost: js.UndefOr[Post]) => { for { post <- aPost userID <- sessionFactory.user.flatMap(_._id) } yield post.likedBy.exists(_.contains(userID)) } $scope.likePost = (aPost: js.UndefOr[Post]) => likeOrUnlikePost(aPost, like = true) $scope.unlikePost = (aPost: js.UndefOr[Post]) => likeOrUnlikePost(aPost, like = false) private def likeOrUnlikePost(aPost: js.UndefOr[Post], like: Boolean) { val aPostID = aPost.flatMap(_._id) val aUserID = sessionFactory.user.map(_._id) val result = for { post <- aPost.toOption postID <- post._id.toOption userID <- sessionFactory.user.flatMap(_._id).toOption } yield (post, postID, userID) result match { case Some((post, postID, userID)) => post.likeLoading = true val promise = if (like) postService.likePost(postID, userID) else postService.unlikePost(postID, userID) promise onComplete { case Success(updatedPost) => console.log(s"updatedPost = ${angular.toJson(updatedPost, pretty = true)}") $timeout(() => post.likeLoading = false, 1.second) $scope.updatePost(updatedPost) case Failure(e) => $scope.$apply(() => post.likeLoading = false) console.error(s"Failed while liking the post ($aPostID) for userID ($aUserID): ${e.displayMessage}") toaster.error("Error liking a post", e.displayMessage) } case None => console.error(s"Either the post ($aPostID) or userID ($aUserID) was missing") } } $scope.publishPost = (aPost: js.UndefOr[Post]) => { for { user <- sessionFactory.user post <- aPost } { post.loading = true post.submitter = Submitter(user) post.creationTime = new js.Date() // finally, save the post savePost(user, post) onComplete { case Success(updatedPost) => console.log(s"updatedPost = ${angular.toJson(updatedPost)}") $timeout(() => post.loading = false, 1.second) // are there files pending for upload? if ($scope.uploader.getNotUploadedItems().nonEmpty) { console.log("Scheduling pending files for upload...") lastUploadedPost = Option(updatedPost) $scope.uploader.uploadAll() } // update the UI $scope.setupNewPost() $scope.updatePost(updatedPost) case Failure(e) => post.loading = false console.error(s"Failed saving a post: ${e.displayMessage}") toaster.error("Posting Error", "General fault while publishing a post") } } } $scope.reloadPost = (aPostID: js.UndefOr[String]) => aPostID foreach { postID => console.log(s"Attempting to reload post $postID...") for (post <- $scope.posts.find(_._id.exists(_ == postID))) { post.loading = true } postService.getPostByID(postID) onComplete { case Success(updatedPost) => $scope.$apply(() => $scope.updatePost(updatedPost)) case Failure(e) => console.error(s"Failed to reload post $postID") } } $scope.setupNewPost = () => { sessionFactory.user.foreach { u => console.log(s"Setting up a new post...") $scope.newPost = Post(u) } } $scope.updatePost = (anUpdatedPost: js.UndefOr[Post]) => anUpdatedPost foreach { updatedPost => $scope.posts.indexWhere(_._id ?== updatedPost._id) match { case -1 => $scope.posts.push(updatedPost) case index => $scope.posts(index) = updatedPost } if (updatedPost.submitter.nonAssigned) { updatedPost.submitterId.flat foreach { submitterId => userFactory.getUserByID(submitterId) onComplete { case Success(user) => $scope.$apply(() => updatedPost.submitter = Submitter(user)) case Failure(e) => toaster.error("Submitter retrieval", e.displayMessage) } } } } /////////////////////////////////////////////////////////////////////////// // Comment Functions /////////////////////////////////////////////////////////////////////////// $scope.isLikedComment = (aComment: js.UndefOr[Comment]) => { for { comment <- aComment userID <- sessionFactory.user.flatMap(_._id) } yield comment.likedBy.exists(_.contains(userID)) } $scope.likeComment = (aPostID: js.UndefOr[String], aComment: js.UndefOr[Comment]) => { likeOrUnlikeComment(aPostID, aComment, like = true) } $scope.unlikeComment = (aPostID: js.UndefOr[String], aComment: js.UndefOr[Comment]) => { likeOrUnlikeComment(aPostID, aComment, like = false) } private def likeOrUnlikeComment(aPostID: js.UndefOr[String], aComment: js.UndefOr[Comment], like: Boolean) { val aUserID = sessionFactory.user.flatMap(_._id) val result = for { comment <- aComment.toOption commentID <- comment._id.toOption postID <- aPostID.toOption userID <- aUserID.toOption } yield (comment, postID, commentID, userID) result match { case Some((comment, postID, commentID, userID)) => comment.likeLoading = true val promise = if (like) postService.likeComment(postID, commentID, userID) else postService.unlikeComment(postID, commentID, userID) promise onComplete { case Success(updatedPost) => $timeout(() => comment.likeLoading = false, 1.second) val index = $scope.posts.indexWhere(_._id ?== updatedPost._id) if (index != -1) { console.log(s"Updating post index $index") $scope.$apply(() => $scope.posts(index) = updatedPost) } case Failure(e) => comment.likeLoading = false console.error(s"Failed while liking the comment ($aComment) or userID ($aUserID): ${e.displayMessage}") toaster.error("Error performing LIKE", e.displayMessage) } case None => console.error(s"Either the postID ($aPostID), comment (${aComment.flatMap(_._id)}) or userID ($aUserID) was missing") } } $scope.publishComment = (aPost: js.UndefOr[Post], aComment: js.UndefOr[String]) => { for { post <- aPost postID <- post._id user <- sessionFactory.user text <- aComment } { val submitter = Submitter(user) val comment = Comment(text, submitter) postService.createComment(postID, comment) onComplete { case Success(updatedPost) => $scope.$apply(() => $scope.updatePost(updatedPost)) case Failure(e) => console.error(s"Failed while adding a new comment the post ($aPost) or userID (${user._id}): ${e.displayMessage}") toaster.error("Error adding comment", e.displayMessage) } } } $scope.setupNewComment = (aPost: js.UndefOr[Post]) => aPost foreach (_.newComment = true) /////////////////////////////////////////////////////////////////////////// // Reply Functions /////////////////////////////////////////////////////////////////////////// $scope.isLikedReply = (aPost: js.UndefOr[Post], aReply: js.UndefOr[Reply]) => { for { post <- aPost reply <- aReply replyLikes <- post.replyLikes userID <- sessionFactory.user.flatMap(_._id) } yield replyLikes.exists(_.likedBy.exists(_.contains(userID))) } $scope.likeReply = (aPostID: js.UndefOr[String], aCommentID: js.UndefOr[String], aReply: js.UndefOr[Reply]) => { likeOrUnlikeReply(aPostID, aCommentID, aReply, like = true) } $scope.unlikeReply = (aPostID: js.UndefOr[String], aCommentID: js.UndefOr[String], aReply: js.UndefOr[Reply]) => { likeOrUnlikeReply(aPostID, aCommentID, aReply, like = false) } private def likeOrUnlikeReply(aPostID: js.UndefOr[String], aCommentID: js.UndefOr[String], aReply: js.UndefOr[Reply], like: Boolean) { val aUserID = sessionFactory.user.flatMap(_._id) val result = for { postID <- aPostID.toOption commentID <- aCommentID.toOption reply <- aReply.toOption replyID <- reply._id.toOption userID <- aUserID.toOption } yield (reply, postID, commentID, replyID, userID) result match { case Some((reply, postID, commentID, replyID, userID)) => reply.likeLoading = true val promise = if (like) postService.likeReply(postID, commentID, replyID, userID) else postService.unlikeReply(postID, commentID, replyID, userID) promise onComplete { case Success(updatedPost) => $timeout(() => reply.likeLoading = false, 1.second) val index = $scope.posts.indexWhere(_._id ?== updatedPost._id) if (index != -1) { console.log(s"Updating post index $index") $scope.$apply(() => $scope.posts(index) = updatedPost) } case Failure(e) => reply.likeLoading = false console.error(s"Failed while liking the reply ($aReply) or userID ($aUserID): ${e.displayMessage}") toaster.error("Error performing LIKE", e.displayMessage) } case None => console.error(s"Either the postID ($aPostID), reply (${angular.toJson(aReply)}) or userID ($aUserID) was missing") } } $scope.publishReply = (aPost: js.UndefOr[Post], aComment: js.UndefOr[Comment], aText: js.UndefOr[String]) => { for { post <- aPost postID <- post._id comment <- aComment commentID <- comment._id user <- sessionFactory.user text <- aText } { val submitter = Submitter(user) val reply = Reply(text, submitter) postService.createReply(postID, commentID, reply) onComplete { case Success(updatedPost) => $scope.$apply { () => comment.replies.foreach(_.push(reply)) comment.newReply = false } case Failure(e) => console.error(s"Failed while adding a new reply the post ($aPost) or userID (${user._id}): ${e.displayMessage}") toaster.error("Error adding reply", e.displayMessage) } } } $scope.setupNewReply = (aComment: js.UndefOr[Comment]) => aComment foreach (_.newReply = true) /////////////////////////////////////////////////////////////////////////// // Tag Functions /////////////////////////////////////////////////////////////////////////// $scope.getTags = (aPost: js.UndefOr[Post]) => aPost flatMap (post => post.text.flatMap(extractHashTags) ?? post.tags) private def extractHashTags(text: String): js.UndefOr[js.Array[String]] = { if (text.contains('#')) { val tags = js.Array[String]() var lastPos = -1 do { val start = text.indexOf('#', lastPos) if (start != -1) { val end = text.indexOf(' ', start) val limit = if (end != -1) end else text.length val hashTag = text.substring(start, limit) tags.push(hashTag.tail) lastPos = start + hashTag.length } else lastPos = -1 } while (lastPos != -1 && lastPos < text.length) tags } else js.undefined } $scope.appendTag = (aTag: js.UndefOr[String]) => aTag foreach { tag => console.log(s"Adding '$tag' to filter...") $scope.tags.push(tag) loadPostsByTags($scope.tags) } $scope.removeTag = (aTag: js.UndefOr[String]) => aTag foreach { tag => $scope.tags.indexOf(tag) match { case -1 => case index => $scope.tags.remove(index) loadPostsByTags($scope.tags) } } /////////////////////////////////////////////////////////////////////////// // Private Functions /////////////////////////////////////////////////////////////////////////// private def loadPostsByTags(tags: js.Array[String]) = { $scope.postsLoading = true val outcome = for { posts <- postService.getPostsByTag(tags) enrichedPosts <- userFactory.enrich(posts) } yield enrichedPosts outcome onComplete { case Success(posts) => $timeout(() => $scope.postsLoading = false, 1.second) $scope.$apply(() => $scope.posts = posts) case Failure(e) => $scope.postsLoading = false console.error(s"Error loading posts for tags '${tags.mkString(", ")}'") toaster.error(s"Error loading posts for tags", e.displayMessage) } } private def savePost(user: User, post: Post) = { val alreadySaved = post._id.isAssigned console.log(s"${if (alreadySaved) s"Updating (${post._id}) " else "Saving"} post...") // perform the update (if (alreadySaved) postService.updatePost(post) else postService.createPost(post)) map { post => if (post.submitter.isEmpty) post.submitter = Submitter(user) post } } /////////////////////////////////////////////////////////////////////////// // Event Listener Functions /////////////////////////////////////////////////////////////////////////// // clear the queue after all uploads have complete $scope.uploader.onCompleteAll = () => { $scope.uploader.clearQueue() lastUploadedPost.foreach { post => $scope.reloadPost(post._id) lastUploadedPost = None } } // listen for the "onAfterAddingAll" event $scope.uploader.onAfterAddingAll = (addedFileItems: js.Array[FileItem]) => { console.log("Updating upload endpoints for attachments...") console.log(s"newPost = ${angular.toJson($scope.newPost)}") console.log(addedFileItems) for { newPost <- $scope.newPost user <- sessionFactory.user userId <- user._id.flat } { // if the post itself has not already been created ... if (newPost._id.nonAssigned) { newPost.submitter = Submitter(user) postService.createPost(newPost) onComplete { case Success(post) => $scope.$apply { () => newPost._id = post._id for { fileItem <- addedFileItems postId <- newPost._id.flat } { fileItem.url = postService.getUploadURL(postId, userId) } } case Failure(e) => console.error(s"Failed while creating post for upload: ${e.displayMessage}") toaster.error("Post Error", "Failed while creating post for upload") } } else { for { fileItem <- addedFileItems postId <- newPost._id.flat } { fileItem.url = postService.getUploadURL(postId, userId) } } } } } /** * Posting Capabilities Scope * @author lawrence.daniels@gmail.com */ @js.native trait PostingCapabilitiesScope extends Scope { var newPost: js.UndefOr[Post] = js.native var posts: js.Array[Post] = js.native var postsLoading: js.UndefOr[Boolean] = js.native var tags: js.Array[String] = js.native var uploader: FileUploader = js.native // posts var deletePost: js.Function1[js.UndefOr[Post], Unit] = js.native var isDeletable: js.Function1[js.UndefOr[Post], js.UndefOr[Boolean]] = js.native var isLikedPost: js.Function1[js.UndefOr[Post], js.UndefOr[Boolean]] = js.native var likePost: js.Function1[js.UndefOr[Post], Unit] = js.native var publishPost: js.Function1[js.UndefOr[Post], Unit] = js.native var reloadPost: js.Function1[js.UndefOr[String], Unit] = js.native var setupNewPost: js.Function0[Unit] = js.native var unlikePost: js.Function1[js.UndefOr[Post], Unit] = js.native var updatePost: js.Function1[js.UndefOr[Post], Unit] = js.native // comments var isLikedComment: js.Function1[js.UndefOr[Comment], js.UndefOr[Boolean]] = js.native var likeComment: js.Function2[js.UndefOr[String], js.UndefOr[Comment], Unit] = js.native var unlikeComment: js.Function2[js.UndefOr[String], js.UndefOr[Comment], Unit] = js.native var publishComment: js.Function2[js.UndefOr[Post], js.UndefOr[String], Unit] = js.native var setupNewComment: js.Function1[js.UndefOr[Post], Unit] = js.native // replies var isLikedReply: js.Function2[js.UndefOr[Post], js.UndefOr[Reply], js.UndefOr[Boolean]] = js.native var likeReply: js.Function3[js.UndefOr[String], js.UndefOr[String], js.UndefOr[Reply], Unit] = js.native var unlikeReply: js.Function3[js.UndefOr[String], js.UndefOr[String], js.UndefOr[Reply], Unit] = js.native var publishReply: js.Function3[js.UndefOr[Post], js.UndefOr[Comment], js.UndefOr[String], Unit] = js.native var setupNewReply: js.Function1[js.UndefOr[Comment], Unit] = js.native // tag functions var getTags: js.Function1[js.UndefOr[Post], js.UndefOr[js.Array[String]]] = js.native var appendTag: js.Function1[js.UndefOr[String], Unit] = js.native var removeTag: js.Function1[js.UndefOr[String], Unit] = js.native }
ldaniels528/awt
app-angularjs/src/main/scala/com/microsoft/awt/components/PostingCapabilities.scala
Scala
apache-2.0
19,378
/** * Copyright 2011-2016 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.core.action import io.gatling.commons.util.TimeHelper.nowMillis import io.gatling.core.session.Session import io.gatling.core.stats.StatsEngine import io.gatling.core.stats.message.End import io.gatling.core.stats.writer.UserMessage import akka.actor.ActorRef class Exit(controller: ActorRef, statsEngine: StatsEngine) extends Action { override val name = "gatling-exit" def execute(session: Session): Unit = { logger.debug(s"End user #${session.userId}") session.exit() val userEnd = UserMessage(session, End, nowMillis) statsEngine.logUser(userEnd) controller ! userEnd } }
GabrielPlassard/gatling
gatling-core/src/main/scala/io/gatling/core/action/Exit.scala
Scala
apache-2.0
1,244
import scala.reflect.macros.Context import scala.language.experimental.macros import scala.annotation.StaticAnnotation object doublerMacro { def impl(c: Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { import c.universe._ val result = { def double[T <: Name](name: T): T = { val sdoubled = name.toString + name.toString val doubled = if (name.isTermName) newTermName(sdoubled) else newTypeName(sdoubled) doubled.asInstanceOf[T] } annottees.map(_.tree).toList match { case ClassDef(mods, name, tparams, impl) :: rest => ClassDef(mods, double(name), tparams, impl) :: rest case ModuleDef(mods, name, impl) :: rest => ModuleDef(mods, double(name), impl) :: rest case DefDef(mods, name, tparams, vparamss, tpt, rhs) :: rest => DefDef(mods, double(name), tparams, vparamss, tpt, rhs) :: rest case TypeDef(mods, name, tparams, rhs) :: rest => TypeDef(mods, double(name), tparams, rhs) :: rest case ValDef(mods, name, tpt, rhs) :: rest => ValDef(mods, double(name), tpt, rhs) :: rest } } c.Expr[Any](Block(result, Literal(Constant(())))) } } class doubler1 extends StaticAnnotation { def transform(annottees: Any*) = macro doublerMacro.impl } class doubler2 extends StaticAnnotation { def macroTransform(annottees: Any*) = ??? } class doubler3 extends StaticAnnotation { def macroTransform(annottee: Any) = macro doublerMacro.impl } class doubler4 extends StaticAnnotation { def macroTransform(myAnnottees: Any*) = macro doublerMacro.impl } class doubler5 extends StaticAnnotation { def macroTransform[T](annottees: Any*) = macro doublerMacro.impl }
scalamacros/paradise
tests/src/test/scala/annotations/neg/macro-annotation-badsig/Macros_1.scala
Scala
bsd-3-clause
1,669
// Generated by the Scala Plugin for the Protocol Buffer Compiler. // Do not edit! // // Protofile syntax: PROTO3 package com.google.protobuf.wrappers /** Wrapper message for `bool`. * * The JSON representation for `BoolValue` is JSON `true` and `false`. * * @param value * The bool value. */ @SerialVersionUID(0L) final case class BoolValue( value: _root_.scala.Boolean = false, unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty ) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[BoolValue] { @transient private[this] var __serializedSizeMemoized: _root_.scala.Int = 0 private[this] def __computeSerializedSize(): _root_.scala.Int = { var __size = 0 { val __value = value if (__value != false) { __size += _root_.com.google.protobuf.CodedOutputStream.computeBoolSize(1, __value) } }; __size += unknownFields.serializedSize __size } override def serializedSize: _root_.scala.Int = { var __size = __serializedSizeMemoized if (__size == 0) { __size = __computeSerializedSize() + 1 __serializedSizeMemoized = __size } __size - 1 } def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = { { val __v = value if (__v != false) { _output__.writeBool(1, __v) } }; unknownFields.writeTo(_output__) } def withValue(__v: _root_.scala.Boolean): BoolValue = copy(value = __v) def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v) def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty) def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = { (__fieldNumber: @_root_.scala.unchecked) match { case 1 => { val __t = value if (__t != false) __t else null } } } def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = { _root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor) (__field.number: @_root_.scala.unchecked) match { case 1 => _root_.scalapb.descriptors.PBoolean(value) } } def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this) def companion: com.google.protobuf.wrappers.BoolValue.type = com.google.protobuf.wrappers.BoolValue // @@protoc_insertion_point(GeneratedMessage[google.protobuf.BoolValue]) } object BoolValue extends scalapb.GeneratedMessageCompanion[com.google.protobuf.wrappers.BoolValue] with scalapb.JavaProtoSupport[com.google.protobuf.wrappers.BoolValue, com.google.protobuf.BoolValue] { implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.wrappers.BoolValue] with scalapb.JavaProtoSupport[com.google.protobuf.wrappers.BoolValue, com.google.protobuf.BoolValue] = this def toJavaProto(scalaPbSource: com.google.protobuf.wrappers.BoolValue): com.google.protobuf.BoolValue = { val javaPbOut = com.google.protobuf.BoolValue.newBuilder javaPbOut.setValue(scalaPbSource.value) javaPbOut.build } def fromJavaProto(javaPbSource: com.google.protobuf.BoolValue): com.google.protobuf.wrappers.BoolValue = com.google.protobuf.wrappers.BoolValue( value = javaPbSource.getValue.booleanValue ) def parseFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.wrappers.BoolValue = { var __value: _root_.scala.Boolean = false var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null var _done__ = false while (!_done__) { val _tag__ = _input__.readTag() _tag__ match { case 0 => _done__ = true case 8 => __value = _input__.readBool() case tag => if (_unknownFields__ == null) { _unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder() } _unknownFields__.parseField(tag, _input__) } } com.google.protobuf.wrappers.BoolValue( value = __value, unknownFields = if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty else _unknownFields__.result() ) } implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.wrappers.BoolValue] = _root_.scalapb.descriptors.Reads{ case _root_.scalapb.descriptors.PMessage(__fieldsMap) => _root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.") com.google.protobuf.wrappers.BoolValue( value = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Boolean]).getOrElse(false) ) case _ => throw new RuntimeException("Expected PMessage") } def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = WrappersProto.javaDescriptor.getMessageTypes().get(6) def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = WrappersProto.scalaDescriptor.messages(6) def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number) lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber) lazy val defaultInstance = com.google.protobuf.wrappers.BoolValue( value = false ) implicit class BoolValueLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.wrappers.BoolValue]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.wrappers.BoolValue](_l) { def value: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Boolean] = field(_.value)((c_, f_) => c_.copy(value = f_)) } final val VALUE_FIELD_NUMBER = 1 def of( value: _root_.scala.Boolean ): _root_.com.google.protobuf.wrappers.BoolValue = _root_.com.google.protobuf.wrappers.BoolValue( value ) // @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.BoolValue]) }
scalapb/ScalaPB
scalapb-runtime/src/main/scalajvm/com/google/protobuf/wrappers/BoolValue.scala
Scala
apache-2.0
6,291
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeMap, AttributeReference, Expression, NamedExpression, SortOrder} import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, PartitioningCollection, UnknownPartitioning} /** * A trait that provides functionality to handle aliases in the `outputExpressions`. */ trait AliasAwareOutputExpression extends UnaryExecNode { protected def outputExpressions: Seq[NamedExpression] private lazy val aliasMap = AttributeMap(outputExpressions.collect { case a @ Alias(child: AttributeReference, _) => (child, a.toAttribute) }) protected def hasAlias: Boolean = aliasMap.nonEmpty protected def normalizeExpression(exp: Expression): Expression = { exp.transform { case attr: AttributeReference => aliasMap.getOrElse(attr, attr) } } } /** * A trait that handles aliases in the `outputExpressions` to produce `outputPartitioning` that * satisfies distribution requirements. */ trait AliasAwareOutputPartitioning extends AliasAwareOutputExpression { final override def outputPartitioning: Partitioning = { val normalizedOutputPartitioning = if (hasAlias) { child.outputPartitioning match { case e: Expression => normalizeExpression(e).asInstanceOf[Partitioning] case other => other } } else { child.outputPartitioning } flattenPartitioning(normalizedOutputPartitioning).filter { case hashPartitioning: HashPartitioning => hashPartitioning.references.subsetOf(outputSet) case _ => true } match { case Seq() => UnknownPartitioning(child.outputPartitioning.numPartitions) case Seq(singlePartitioning) => singlePartitioning case seqWithMultiplePartitionings => PartitioningCollection(seqWithMultiplePartitionings) } } private def flattenPartitioning(partitioning: Partitioning): Seq[Partitioning] = { partitioning match { case PartitioningCollection(childPartitionings) => childPartitionings.flatMap(flattenPartitioning) case rest => rest +: Nil } } } /** * A trait that handles aliases in the `orderingExpressions` to produce `outputOrdering` that * satisfies ordering requirements. */ trait AliasAwareOutputOrdering extends AliasAwareOutputExpression { protected def orderingExpressions: Seq[SortOrder] final override def outputOrdering: Seq[SortOrder] = { if (hasAlias) { orderingExpressions.map(normalizeExpression(_).asInstanceOf[SortOrder]) } else { orderingExpressions } } }
maropu/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/AliasAwareOutputExpression.scala
Scala
apache-2.0
3,412
package com.twitter.finatra.json.internal.caseclass.validation.validators import com.twitter.finatra.json.internal.caseclass.validation.validators.CountryCodeValidator._ import com.twitter.finatra.validation.{CountryCode, ErrorCode, ValidationMessageResolver, ValidationResult, Validator} import java.util.Locale object CountryCodeValidator { def errorMessage( resolver: ValidationMessageResolver, value: Any) = { resolver.resolve( classOf[CountryCode], toErrorValue(value)) } private def toErrorValue(value: Any) = { value match { case arrayValue: Array[_] => arrayValue mkString "," case traversableValue: Traversable[_] => traversableValue mkString "," case anyValue => anyValue.toString } } } class CountryCodeValidator( validationMessageResolver: ValidationMessageResolver, annotation: CountryCode) extends Validator[CountryCode, Any]( validationMessageResolver, annotation) { private val countryCodes = Locale.getISOCountries.toSet /* Public */ override def isValid(value: Any): ValidationResult = { value match { case typedValue: Array[Any] => validationResult(typedValue) case typedValue: Traversable[Any] => validationResult(typedValue) case anyValue => validationResult( Seq(anyValue.toString)) } } /* Private */ private def findInvalidCountryCodes(values: Traversable[Any]) = { val uppercaseCountryCodes = values.toSet map { value: Any => value.toString.toUpperCase } uppercaseCountryCodes diff countryCodes } private def validationResult(value: Traversable[Any]) = { val invalidCountryCodes = findInvalidCountryCodes(value) ValidationResult.validate( invalidCountryCodes.isEmpty, errorMessage( validationMessageResolver, value), ErrorCode.InvalidCountryCodes(invalidCountryCodes)) } }
joecwu/finatra
jackson/src/main/scala/com/twitter/finatra/json/internal/caseclass/validation/validators/CountryCodeValidator.scala
Scala
apache-2.0
1,943
/* * Copyright 2015-2016 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package whisk.core.container import java.nio.file.Files import java.nio.file.Paths import java.util.Timer import java.util.TimerTask import java.util.concurrent.ConcurrentLinkedQueue import java.util.concurrent.locks.ReentrantLock import scala.annotation.tailrec import scala.collection.concurrent.TrieMap import scala.collection.mutable.ListBuffer import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{ Try, Success, Failure } import akka.actor.ActorSystem import whisk.common.Counter import whisk.common.Logging import whisk.common.Scheduler import whisk.common.TimingUtil import whisk.common.TransactionId import whisk.core.WhiskConfig import whisk.core.WhiskConfig._ import whisk.core.entity._ /** * A thread-safe container pool that internalizes container creation/teardown and allows users * to check out a container. * * Synchronization via "this" is used to maintain integrity of the data structures. * A separate object "gcSync" is used to prevent multiple GC's from occurring. * * TODO: Parallel container creation under evaluation for docker 12. */ class ContainerPool( config: WhiskConfig, invokerInstance: Integer = 0, standalone: Boolean = false, saveContainerLog: Boolean = false)(implicit actorSystem: ActorSystem, val logging: Logging) extends ContainerUtils { implicit val executionContext = actorSystem.dispatcher // These must be defined before verbosity is set private val datastore = WhiskEntityStore.datastore(config) private val authStore = WhiskAuthStore.datastore(config) val mounted = !standalone val dockerhost = config.selfDockerEndpoint val serializeDockerOp = config.invokerSerializeDockerOp.toBoolean val serializeDockerPull = config.invokerSerializeDockerPull.toBoolean val useRunc = checkRuncAccess(config.invokerUseRunc.toBoolean) logging.info(this, s"dockerhost = $dockerhost serializeDockerOp = $serializeDockerOp serializeDockerPull = $serializeDockerPull useRunC = $useRunc") // Eventually, we will have a more sophisticated warmup strategy that does multiple sizes private val defaultMemoryLimit = MemoryLimit(MemoryLimit.STD_MEMORY) private val NODEJS6_IMAGE = ExecManifest.runtimesManifest.manifests("nodejs:6").image /** * Check whether we should use runc. To do so, * 1. The whisk config flag must be on. * 2. Runc must be successfully accessible. This is a failsafe in case runc is not set up correctly. * For this stage, logging shows success or failure if we get this far. */ def checkRuncAccess(useRunc: Boolean): Boolean = { if (useRunc) { implicit val tid = TransactionId.invokerNanny val (code, result) = RuncUtils.list() val success = (code == 0) if (success) { logging.info(this, s"Using runc. list result: ${result}") } else { logging.warn(this, s"Not using runc due to error (code = ${code}): ${result}") } success } else { logging.info(this, s"Not using runc because of configuration flag") false } } /** * Enables GC. */ def enableGC(): Unit = { gcOn = true } /** * Disables GC. If disabled, overrides other flags/methods. */ def disableGC(): Unit = { gcOn = false } /** * Performs a GC immediately of all idle containers, blocking the caller until completed. */ def forceGC()(implicit transid: TransactionId): Unit = { removeAllIdle({ containerInfo => true }) } /* * Getter/Setter for various GC parameters. */ def gcThreshold: FiniteDuration = _gcThreshold def maxIdle: Int = _maxIdle // container count def maxActive: Int = _maxActive // container count def gcThreshold_=(value: FiniteDuration): Unit = _gcThreshold = (Duration.Zero max value) def maxIdle_=(value: Int): Unit = _maxIdle = Math.max(0, value) def maxActive_=(value: Int): Unit = _maxActive = Math.max(0, value) def resetMaxIdle() = _maxIdle = defaultMaxIdle def resetMaxActive() = { _maxActive = ContainerPool.getDefaultMaxActive(config) logging.info(this, s"maxActive set to ${_maxActive}") } def resetGCThreshold() = _gcThreshold = defaultGCThreshold /* * Controls where docker container logs are put. */ def logDir: String = _logDir def logDir_=(value: String): Unit = _logDir = value /* * How many containers are in the pool at the moment? * There are also counts of containers we are trying to start but have not inserted into the data structure. */ def idleCount() = countByState(State.Idle) def activeCount() = countByState(State.Active) private val startingCounter = new Counter() private var shuttingDown = false /* * Tracks requests for getting containers. * The first value doled out via nextPosition.next() will be 1 and completedPosition.cur remains at 0 until completion. */ private val nextPosition = new Counter() private val completedPosition = new Counter() /* * Lists ALL containers at this docker point with "docker ps -a --no-trunc". * This could include containers not in this pool at all. */ def listAll()(implicit transid: TransactionId): Seq[ContainerState] = listContainers(true) /** * Retrieves (possibly create) a container based on the subject and versioned action. * A flag is included to indicate whether initialization succeeded. * The invariant of returning the container back to the pool holds regardless of whether init succeeded or not. * In case of failure to start a container (or for failed docker operations e.g., pull), an exception is thrown. */ def getAction(action: WhiskAction, auth: AuthKey)(implicit transid: TransactionId): (WhiskContainer, Option[RunResult]) = { if (shuttingDown) { logging.info(this, s"Shutting down: Not getting container for ${action.fullyQualifiedName(true)} with ${auth.uuid}") throw new Exception("system is shutting down") } else { val key = ActionContainerId(auth.uuid, action.fullyQualifiedName(true).toString, action.rev) val myPos = nextPosition.next() logging.info(this, s"""Getting container for ${action.fullyQualifiedName(true)} of kind ${action.exec.kind} with ${auth.uuid}: | myPos = $myPos | completed = ${completedPosition.cur} | slack = ${slack()} | activeCount = ${activeCount()} | toBeRemoved = ${toBeRemoved.size} | startingCounter = ${startingCounter.cur}""".stripMargin) val conResult = Try(getContainer(1, myPos, key, () => makeWhiskContainer(action, auth))) completedPosition.next() conResult match { case Success(Cold(con)) => logging.info(this, s"Obtained cold container ${con.containerId.id} - about to initialize") val initResult = initWhiskContainer(action, con) (con, Some(initResult)) case Success(Warm(con)) => logging.info(this, s"Obtained warm container ${con.containerId.id}") (con, None) case Failure(t) => logging.error(this, s"Exception while trying to get a container: $t") throw t } } } /* * For testing by ContainerPoolTests where non whisk containers are used. * These do not require initialization. */ def getByImageName(imageName: String, args: Array[String])(implicit transid: TransactionId): Option[Container] = { logging.info(this, s"Getting container for image $imageName with args " + args.mkString(" ")) // Not a regular key. Doesn't matter in testing. val key = new ActionContainerId(s"instantiated." + imageName + args.mkString("_")) getContainer(1, 0, key, () => makeContainer(key, imageName, args)) match { case Cold(con) => Some(con) case Warm(con) => Some(con) case _ => None } } /** * Tries to get/create a container via the thunk by delegating to getOrMake. * This method will apply retry so that the caller is blocked until retry succeeds. */ @tailrec final def getContainer(tryCount: Int, position: Long, key: ActionContainerId, conMaker: () => WhiskContainer)(implicit transid: TransactionId): ContainerResult = { val positionInLine = position - completedPosition.cur // Indicates queue position. 1 means front of the line val available = slack() // Warn at 10 seconds and then once a minute after that. val waitDur = 50.millis val warnAtCount = 10.seconds.toMillis / waitDur.toMillis val warnPeriodic = 60.seconds.toMillis / waitDur.toMillis if (tryCount == warnAtCount || tryCount % warnPeriodic == 0) { logging.warn(this, s"""getContainer has been waiting about ${warnAtCount * waitDur.toMillis} ms: | position = $position | completed = ${completedPosition.cur} | slack = $available | maxActive = ${_maxActive} | activeCount = ${activeCount()} | startingCounter = ${startingCounter.cur}""".stripMargin) } if (positionInLine <= available) { getOrMake(key, conMaker) match { case Some(cr) => cr case None => getContainer(tryCount + 1, position, key, conMaker) } } else { // It's not our turn in line yet. Thread.sleep(waitDur.toMillis) // TODO: Replace with wait/notify but tricky because of desire for maximal concurrency getContainer(tryCount + 1, position, key, conMaker) } } def getNumberOfIdleContainers(key: ActionContainerId)(implicit transid: TransactionId): Int = { this.synchronized { keyMap.get(key) map { bucket => bucket.count { _.isIdle } } getOrElse 0 } } /* * How many containers can we start? Someone could have fully started a container so we must include startingCounter. * The use of a method rather than a getter is meant to signify the synchronization in the implementation. */ private def slack() = _maxActive - (activeCount() + startingCounter.cur + Math.max(toBeRemoved.size - RM_SLACK, 0)) /* * Try to get or create a container, returning None if there are too many * active containers. * * The multiple synchronization block, and the use of startingCounter, * is needed to make sure container count is accurately tracked, * data structure maintains integrity, but to keep all length operations * outside of the lock. * * The returned container will be active (not paused). */ def getOrMake(key: ActionContainerId, conMaker: () => WhiskContainer)(implicit transid: TransactionId): Option[ContainerResult] = { retrieve(key) match { case CacheMiss => { val con = conMaker() this.synchronized { introduceContainer(key, con).state = State.Active } Some(Cold(con)) } case CacheHit(con) => con.transid = transid runDockerOp { if (con.unpause()) { Some(Warm(con)) } else { // resume failed, gc the container putBack(con, delete = true) None } } case CacheBusy => None } } /** * Obtains a pre-existing container from the pool - and putting it to Active state but without docker unpausing. * If we are over capacity, signal Busy. * If it does not exist ready to do, indicate a miss. */ def retrieve(key: ActionContainerId)(implicit transid: TransactionId): CacheResult = { this.synchronized { // first check if there is a matching container and only if there aren't any // determine if the pool is full or has capacity to accommodate a new container; // this allows any new containers introduced into the pool to be reused if already idle val bucket = keyMap.getOrElseUpdate(key, new ListBuffer()) bucket.find({ ci => ci.isIdle }) match { case None => if (activeCount() + startingCounter.cur >= _maxActive) { CacheBusy } else { CacheMiss } case Some(ci) => { ci.state = State.Active CacheHit(ci.container) } } } } /** * Moves a container from one bucket (i.e. key) to a different one. * This operation is performed when we specialize a pre-warmed container to an action. * ContainerMap does not need to be updated as the Container <-> ContainerInfo relationship does not change. */ def changeKey(ci: ContainerInfo, oldKey: ActionContainerId, newKey: ActionContainerId)(implicit transid: TransactionId) = { this.synchronized { assert(ci.state == State.Active) assert(keyMap.contains(oldKey)) val oldBucket = keyMap(oldKey) val newBucket = keyMap.getOrElseUpdate(newKey, new ListBuffer()) oldBucket -= ci newBucket += ci } } /** * Returns the container to the pool or delete altogether. * This call can be slow but not while locking data structure so it does not interfere with other activations. */ def putBack(container: Container, delete: Boolean = false)(implicit transid: TransactionId): Unit = { logging.info(this, s"""putBack returning container ${container.id} | delete = $delete | completed = ${completedPosition.cur} | slack = ${slack()} | maxActive = ${_maxActive} | activeCount = ${activeCount()} | startingCounter = ${startingCounter.cur}""".stripMargin) // Docker operation outside sync block. Don't pause if we are deleting. if (!delete) { runDockerOp { // pausing eagerly is pessimal; there could be an action waiting // that will immediately unpause the same container to reuse it; // to skip pausing, will need to inspect the queue of waiting activations // for a matching key container.pause() } } val toBeDeleted = this.synchronized { // Return container to pool logically and then optionally delete // Always put back logically for consistency val Some(ci) = containerMap.get(container) assert(ci.state == State.Active) ci.lastUsed = System.currentTimeMillis() ci.state = State.Idle val toBeDeleted = if (delete) { removeContainerInfo(ci) // no docker operation here List(ci) } else { List() } this.notify() toBeDeleted } toBeDeleted.foreach { ci => toBeRemoved.offer(RemoveJob(false, ci)) } // Perform capacity-based GC here. if (gcOn) { // Synchronization occurs inside calls in a fine-grained manner. while (idleCount() > _maxIdle) { // it is safe for this to be non-atomic with body removeOldestIdle() } } } // ------------------------------------------------------------------------------------------------------------ object State extends Enumeration { val Idle, Active = Value } /** * Wraps a Container to allow a ContainerPool-specific information. */ class ContainerInfo(k: ActionContainerId, con: WhiskContainer) { val key = k val container = con var state = State.Idle var lastUsed = System.currentTimeMillis() def isIdle = state == State.Idle def isStemCell = key == stemCellNodejsKey } private val containerMap = new TrieMap[Container, ContainerInfo] private val keyMap = new TrieMap[ActionContainerId, ListBuffer[ContainerInfo]] // These are containers that are already removed from the data structure waiting to be docker-removed case class RemoveJob(needUnpause: Boolean, containerInfo: ContainerInfo) private val toBeRemoved = new ConcurrentLinkedQueue[RemoveJob] // Note that the prefix separates the name space of this from regular keys. // TODO: Generalize across language by storing image name when we generalize to other languages // Better heuristic for # of containers to keep warm - make sensitive to idle capacity private val stemCellNodejsKey = StemCellNodeJsActionContainerId private val WARM_NODEJS_CONTAINERS = 2 // This parameter controls how many outstanding un-removed containers there are before // we stop stem cell container creation. This is also the an allowance in slack calculation // to allow limited de-coupling between container removal and creation when under load. private val RM_SLACK = 4 private def keyMapToString(): String = { keyMap.map(p => s"[${p._1.stringRepr} -> ${p._2}]").mkString(" ") } // Easier to walk containerMap than keyMap private def countByState(state: State.Value) = this.synchronized { containerMap.count({ case (_, ci) => ci.state == state }) } // Sample container name: wsk1_1_joeibmcomhelloWorldDemo_20150901T202701852Z private def makeContainerName(localName: String): ContainerName = ContainerCounter.containerName(invokerInstance.toString(), localName) private def makeContainerName(action: WhiskAction): ContainerName = makeContainerName(action.fullyQualifiedName(true).toString) /** * dockerLock is a fair lock used to serialize all docker operations except pull. * However, a non-pull operation can run concurrently with a pull operation. */ val dockerLock = new ReentrantLock(true) /** * dockerPullLock is used to serialize all pull operations. */ val dockerPullLock = new ReentrantLock(true) /* A background thread that * 1. Kills leftover action containers on startup * 2. (actually a Future) Periodically re-populates the container pool with fresh (un-instantiated) nodejs containers. * 3. Periodically tears down containers that have logically been removed from the system * 4. Each of the significant method subcalls are guarded to not throw an exception. */ private def nannyThread(allContainers: Seq[ContainerState]) = new Thread { override def run { implicit val tid = TransactionId.invokerNanny if (mounted) { killStragglers(allContainers) // Create a new stem cell if the number of warm containers is less than the count allowed // as long as there is slack so that any actions that may be waiting to create a container // are not held back; Note since this method is not fully synchronized, it is possible to // start this operation while there is slack and end up waiting on the docker lock later val warmupInterval = 100.milliseconds Scheduler.scheduleWaitAtLeast(warmupInterval) { () => implicit val tid = TransactionId.invokerWarmup if (getNumberOfIdleContainers(stemCellNodejsKey) < WARM_NODEJS_CONTAINERS && slack() > 0 && toBeRemoved.size < RM_SLACK) { addStemCellNodejsContainer()(tid) } else { Future.successful(()) } } } while (true) { Thread.sleep(100) // serves to prevent busy looping // We grab the size first so we know there has been enough delay for anything we are shutting down val size = toBeRemoved.size() 1 to size foreach { _ => val removeJob = toBeRemoved.poll() if (removeJob != null) { Thread.sleep(100) // serves to not hog docker lock and add slack teardownContainer(removeJob) } else { logging.error(this, "toBeRemove.poll failed - possibly another concurrent remover?") } } } } } /** * Gracefully terminates by shutting down containers upon SIGTERM. * If one desires to kill the invoker without this, send it SIGKILL. */ private def shutdown() = { implicit val id = TransactionId.invokerWarmup shuttingDown = true killStragglers(listAll()) } /** * All docker operations from the pool must pass through here (except for pull). */ private def runDockerOp[T](dockerOp: => T)(implicit transid: TransactionId): T = { runDockerOpWithLock(serializeDockerOp, dockerLock, dockerOp) } /** * All docker pull operations from the pool must pass through here. */ private def runDockerPull[T](dockerOp: => T)(implicit transid: TransactionId): T = { runDockerOpWithLock(serializeDockerPull, dockerPullLock, dockerOp) } /** * All docker operations from the pool must pass through here (except for pull). */ private def runDockerOpWithLock[T](useLock: Boolean, lock: ReentrantLock, dockerOp: => T)(implicit transid: TransactionId): T = { if (useLock) { lock.lock() } try { val (elapsed, result) = TimingUtil.time { dockerOp } if (elapsed > slowDockerThreshold) { logging.warn(this, s"Docker operation took $elapsed") } result } finally { if (useLock) { lock.unlock() } } } /* * This method will introduce a stem cell container into the system. * If container creation fails, the container will not be entered into the pool. */ private def addStemCellNodejsContainer()(implicit transid: TransactionId) = Future { val imageName = NODEJS6_IMAGE.localImageName(config.dockerRegistry, config.dockerImagePrefix, Some(config.dockerImageTag)) val limits = ActionLimits(TimeLimit(), defaultMemoryLimit, LogLimit()) val containerName = makeContainerName("warmJsContainer") logging.info(this, "Starting warm nodejs container") val con = makeGeneralContainer(stemCellNodejsKey, containerName, imageName, limits, false) this.synchronized { introduceContainer(stemCellNodejsKey, con) } logging.info(this, s"Started warm nodejs container ${con.id}: ${con.containerId}") } andThen { case Failure(t) => logging.warn(this, s"addStemCellNodejsContainer encountered an exception: ${t.getMessage}") } private def getStemCellNodejsContainer(key: ActionContainerId)(implicit transid: TransactionId): Option[WhiskContainer] = retrieve(stemCellNodejsKey) match { case CacheHit(con) => logging.info(this, s"Obtained a pre-warmed container") con.transid = transid val Some(ci) = containerMap.get(con) changeKey(ci, stemCellNodejsKey, key) Some(con) case _ => None } // Obtain a container (by creation or promotion) and initialize by sending code. private def makeWhiskContainer(action: WhiskAction, auth: AuthKey)(implicit transid: TransactionId): WhiskContainer = { val imageName = getDockerImageName(action) val limits = action.limits val nodeImageName = NODEJS6_IMAGE.localImageName(config.dockerRegistry, config.dockerImagePrefix, Some(config.dockerImageTag)) val key = ActionContainerId(auth.uuid, action.fullyQualifiedName(true).toString, action.rev) val warmedContainer = if (limits.memory == defaultMemoryLimit && imageName == nodeImageName) getStemCellNodejsContainer(key) else None val containerName = makeContainerName(action) warmedContainer getOrElse { try { logging.info(this, s"making new container because none available") startingCounter.next() // only Exec instances that are subtypes of CodeExec reach the invoker makeGeneralContainer(key, containerName, imageName, limits, action.exec.asInstanceOf[CodeExec[_]].pull) } finally { val newCount = startingCounter.prev() logging.info(this, s"finished trying to make container, $newCount more containers to start") } } } // Make a container somewhat generically without introducing into data structure. // There is access to global settings (docker registry) // and generic settings (image name - static limits) but without access to WhiskAction. private def makeGeneralContainer( key: ActionContainerId, containerName: ContainerName, imageName: String, limits: ActionLimits, pull: Boolean)( implicit transid: TransactionId): WhiskContainer = { val network = config.invokerContainerNetwork val cpuShare = ContainerPool.cpuShare(config) val policy = config.invokerContainerPolicy val dnsServers = config.invokerContainerDns val env = getContainerEnvironment() // distinguishes between a black box container vs a whisk container def disambiguateContainerError[T](op: => T) = { try { op } catch { case e: ContainerError => throw if (pull) BlackBoxContainerError(e.msg) else WhiskContainerError(e.msg) } } if (pull) runDockerPull { disambiguateContainerError { // if pull fails, the transaction is aborted by throwing an exception; // a pull is only done for black box container ContainerUtils.pullImage(dockerhost, imageName) } } // This will start up the container runDockerOp { disambiguateContainerError { // because of the docker lock, by the time the container gets around to be started // there could be a container to reuse (from a previous run of the same action, or // from a stem cell container); should revisit this logic new WhiskContainer(transid, useRunc, this.dockerhost, mounted, key, containerName, imageName, network, cpuShare, policy, dnsServers, env, limits) } } } // We send the payload here but eventually must also handle morphing a pre-allocated container into the right state. private def initWhiskContainer(action: WhiskAction, con: WhiskContainer)(implicit transid: TransactionId): RunResult = { // only Exec instances that are subtypes of CodeExec reach the invoker val Some(initArg) = action.containerInitializer // Then send it the init payload which is code for now con.init(initArg, action.limits.timeout.duration) } /** * Used through testing only. Creates a container running the command in `args`. */ private def makeContainer(key: ActionContainerId, imageName: String, args: Array[String])(implicit transid: TransactionId): WhiskContainer = { runDockerOp { new WhiskContainer(transid, useRunc, this.dockerhost, mounted, key, makeContainerName("testContainer"), imageName, config.invokerContainerNetwork, ContainerPool.cpuShare(config), config.invokerContainerPolicy, config.invokerContainerDns, Map(), ActionLimits(), args) } } /** * Adds the container into the data structure in an Idle state. * The caller must have synchronized to maintain data structure atomicity. */ private def introduceContainer(key: ActionContainerId, container: WhiskContainer)(implicit transid: TransactionId): ContainerInfo = { val ci = new ContainerInfo(key, container) keyMap.getOrElseUpdate(key, ListBuffer()) += ci containerMap += container -> ci dumpState("introduceContainer") ci } private def dumpState(prefix: String)(implicit transid: TransactionId) = { logging.debug(this, s"$prefix: keyMap = ${keyMapToString()}") } private def getDockerImageName(action: WhiskAction)(implicit transid: TransactionId): String = { // only Exec instances that are subtypes of CodeExec reach the invoker val exec = action.exec.asInstanceOf[CodeExec[_]] val imageName = if (!exec.pull) { exec.image.localImageName(config.dockerRegistry, config.dockerImagePrefix, Some(config.dockerImageTag)) } else exec.image.publicImageName logging.debug(this, s"Using image ${imageName}") imageName } private def getContainerEnvironment(): Map[String, String] = { Map("__OW_API_HOST" -> config.wskApiHost) } private val defaultMaxIdle = 10 private val defaultGCThreshold = 600.seconds private val slowDockerThreshold = 500.millis private val slowDockerPullThreshold = 5.seconds val gcFrequency = 1000.milliseconds // this should not be leaked but a test needs this until GC count is implemented private var _maxIdle = defaultMaxIdle private var _maxActive = 0 private var _gcThreshold = defaultGCThreshold private var gcOn = true private val gcSync = new Object() resetMaxActive() private val timer = new Timer() private val gcTask = new TimerTask { def run() { performGC()(TransactionId.invoker) } } timer.scheduleAtFixedRate(gcTask, 0, gcFrequency.toMillis) /** * Removes all idle containers older than the threshold. */ private def performGC()(implicit transid: TransactionId) = { val expiration = System.currentTimeMillis() - gcThreshold.toMillis removeAllIdle({ containerInfo => containerInfo.lastUsed <= expiration }) dumpState("performGC") } /** * Collects all containers that are in the idle state and pass the predicate. * gcSync is used to prevent multiple GC's. */ private def removeAllIdle(pred: ContainerInfo => Boolean)(implicit transid: TransactionId) = { gcSync.synchronized { val idleInfo = this.synchronized { val idle = containerMap filter { case (container, ci) => ci.isIdle && pred(ci) } idle.keys foreach { con => logging.info(this, s"removeAllIdle removing container ${con.id}") } containerMap --= idle.keys keyMap foreach { case (key, ciList) => ciList --= idle.values } keyMap retain { case (key, ciList) => !ciList.isEmpty } idle.values } idleInfo.foreach { idleCi => toBeRemoved.offer(RemoveJob(!idleCi.isStemCell, idleCi)) } } } // Remove containerInfo from data structures but does not perform actual container operation. // Caller must establish synchronization private def removeContainerInfo(conInfo: ContainerInfo)(implicit transid: TransactionId) = { containerMap -= conInfo.container keyMap foreach { case (key, ciList) => ciList -= conInfo } keyMap retain { case (key, ciList) => !ciList.isEmpty } } private def removeOldestIdle()(implicit transid: TransactionId) = { // Note that the container removal - if any - is done outside the synchronized block val oldestIdle = this.synchronized { val idle = (containerMap filter { case (container, ci) => ci.isIdle }) if (idle.isEmpty) List() else { val oldestConInfo = idle.minBy(_._2.lastUsed)._2 logging.info(this, s"removeOldestIdle removing container ${oldestConInfo.container.id}") removeContainerInfo(oldestConInfo) List(oldestConInfo) } } oldestIdle.foreach { ci => toBeRemoved.offer(RemoveJob(!ci.isStemCell, ci)) } } // Getter/setter for this are above. private var _logDir = "/logs" private val actionContainerPrefix = "wsk" /** * Actually deletes the containers. */ private def teardownContainer(removeJob: RemoveJob)(implicit transid: TransactionId) = try { val container = removeJob.containerInfo.container if (saveContainerLog) { val size = this.getLogSize(container, mounted) val rawLogBytes = container.synchronized { this.getDockerLogContent(container.containerId, 0, size, mounted) } val filename = s"${_logDir}/${container.nameAsString}.log" Files.write(Paths.get(filename), rawLogBytes) logging.info(this, s"teardownContainers: wrote docker logs to $filename") } container.transid = transid runDockerOp { container.remove(removeJob.needUnpause) } } catch { case t: Throwable => logging.warn(this, s"teardownContainer encountered an exception: ${t.getMessage}") } /** * Removes all containers with the actionContainerPrefix to kill leftover action containers. * This is needed for clean startup and shutdown. * Concurrent access from clients must be prevented externally. */ private def killStragglers(allContainers: Seq[ContainerState])(implicit transid: TransactionId) = try { val candidates = allContainers.filter { _.name.name.startsWith(actionContainerPrefix) } logging.info(this, s"killStragglers now removing ${candidates.length} leftover containers") candidates foreach { c => if (useRunc) { RuncUtils.resume(c.id) } else { unpauseContainer(c.name) } rmContainer(c.name) } logging.info(this, s"killStragglers completed") } catch { case t: Throwable => logging.warn(this, s"killStragglers encountered an exception: ${t.getMessage}") } /** * Gets the size of the mounted file associated with this whisk container. */ def getLogSize(con: Container, mounted: Boolean)(implicit transid: TransactionId): Long = { getDockerLogSize(con.containerId, mounted) } nannyThread(listAll()(TransactionId.invokerWarmup)).start if (mounted) { sys addShutdownHook { logging.warn(this, "Shutdown hook activated. Starting container shutdown") shutdown() logging.warn(this, "Shutdown hook completed.") } } } /* * These methods are parameterized on the configuration but defined here as an instance of ContainerPool is not * always available from other call sites. */ object ContainerPool { def requiredProperties = wskApiHost ++ Map( selfDockerEndpoint -> "localhost", dockerRegistry -> "", dockerImagePrefix -> "", dockerImageTag -> "latest", invokerContainerNetwork -> "bridge", invokerNumCore -> "4", invokerCoreShare -> "2", invokerSerializeDockerOp -> "true", invokerSerializeDockerPull -> "true", invokerUseRunc -> "false", invokerContainerPolicy -> "", invokerContainerDns -> "", invokerContainerNetwork -> null) /* * Extract parameters from whisk config. In the future, these may not be static but * dynamically updated. They still serve as a starting point for downstream parameters. */ def numCore(config: WhiskConfig) = config.invokerNumCore.toInt def shareFactor(config: WhiskConfig) = config.invokerCoreShare.toInt /* * The total number of containers is simply the number of cores dilated by the cpu sharing. */ def getDefaultMaxActive(config: WhiskConfig) = numCore(config) * shareFactor(config) /* The shareFactor indicates the number of containers that would share a single core, on average. * cpuShare is a docker option (-c) whereby a container's CPU access is limited. * A value of 1024 is the full share so a strict resource division with a shareFactor of 2 would yield 512. * On an idle/underloaded system, a container will still get to use underutilized CPU shares. */ private val totalShare = 1024.0 // This is a pre-defined value coming from docker and not our hard-coded value. def cpuShare(config: WhiskConfig) = (totalShare / getDefaultMaxActive(config)).toInt }
xin-cai/openwhisk
core/invoker/src/main/scala/whisk/core/container/ContainerPool.scala
Scala
apache-2.0
37,994
/* * Copyright 2017-2022 John Snow Labs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.johnsnowlabs.nlp.annotators.common import scala.collection.Map /** Word tag pair */ case class TaggedWord(word: String, tag: String) case class IndexedTaggedWord(word: String, tag: String, begin: Int = 0, end: Int = 0, confidence: Option[Array[Map[String, String]]] = None, metadata: Map[String, String] = Map()) { def toTaggedWord: TaggedWord = TaggedWord(this.word, this.tag) }
JohnSnowLabs/spark-nlp
src/main/scala/com/johnsnowlabs/nlp/annotators/common/TaggedWord.scala
Scala
apache-2.0
1,029
/* * Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.internal.javadsl.persistence.jpa import java.lang.Long import java.util.concurrent.CompletionStage import com.lightbend.lagom.internal.javadsl.persistence.jdbc.JdbcPersistentEntityRegistry import com.lightbend.lagom.javadsl.persistence.TestEntity.Evt import com.lightbend.lagom.javadsl.persistence._ import com.lightbend.lagom.javadsl.persistence.jpa.{ JpaReadSide, TestEntityJpaReadSide } import play.api.inject.guice.GuiceInjectorBuilder import scala.concurrent.duration._ class JpaReadSideImplSpec extends JpaPersistenceSpec with AbstractReadSideSpec { private lazy val injector = new GuiceInjectorBuilder().build() override protected lazy val persistentEntityRegistry = new JdbcPersistentEntityRegistry(system, injector, slick) private lazy val jpaReadSide: JpaReadSide = new JpaReadSideImpl(jpa, offsetStore) def processorFactory(): ReadSideProcessor[Evt] = new TestEntityJpaReadSide.TestEntityJpaReadSideProcessor(jpaReadSide) private lazy val readSide = new TestEntityJpaReadSide(jpa) def getAppendCount(id: String): CompletionStage[Long] = readSide.getAppendCount(id) override def afterAll(): Unit = { persistentEntityRegistry.gracefulShutdown(5.seconds) super.afterAll() } }
rstento/lagom
persistence-jpa/javadsl/src/test/scala/com/lightbend/lagom/internal/javadsl/persistence/jpa/JpaReadSideImplSpec.scala
Scala
apache-2.0
1,328
/** * Copyright 2015 Mohiva Organisation (license at mohiva dot com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.mohiva.play.silhouette.impl.providers.oauth2 import com.mohiva.play.silhouette.api.LoginInfo import com.mohiva.play.silhouette.api.util.HTTPLayer import com.mohiva.play.silhouette.impl.exceptions.ProfileRetrievalException import com.mohiva.play.silhouette.impl.providers._ import com.mohiva.play.silhouette.impl.providers.oauth2.GitLabProvider._ import play.api.libs.json.JsValue import scala.concurrent.Future /** * Base GitLab OAuth2 Provider. * * @see https://gitlab.com/help/api/oauth2.md * https://gitlab.com/help/integration/oauth_provider.md */ trait BaseGitLabProvider extends OAuth2Provider { /** * The content type to parse a profile from. */ override type Content = JsValue /** * The provider ID. */ override val id = ID /** * Defines the URLs that are needed to retrieve the profile data. */ override protected val urls = Map("api" -> settings.apiURL.getOrElse(API)) /** * Builds the social profile. * * @param authInfo The auth info received from the provider. * @return On success the build social profile, otherwise a failure. */ override protected def buildProfile(authInfo: OAuth2Info): Future[Profile] = { httpLayer.url(urls("api").format(authInfo.accessToken)).get().flatMap { response => val json = response.json (json \\ "message").asOpt[String] match { case Some(msg) => throw new ProfileRetrievalException(SpecifiedProfileError.format(id, msg)) case _ => profileParser.parse(json, authInfo) } } } } /** * The profile parser for the common social profile. */ class GitLabProfileParser extends SocialProfileParser[JsValue, CommonSocialProfile, OAuth2Info] { /** * Parses the social profile. * * @param json The content returned from the provider. * @param authInfo The auth info to query the provider again for additional data. * @return The social profile from given result. */ override def parse(json: JsValue, authInfo: OAuth2Info) = Future.successful { val userID = (json \\ "id").as[Long] val fullName = (json \\ "name").asOpt[String] val avatarUrl = (json \\ "avatar_url").asOpt[String] val email = (json \\ "email").asOpt[String].filter(!_.isEmpty) CommonSocialProfile( loginInfo = LoginInfo(ID, userID.toString), fullName = fullName, avatarURL = avatarUrl, email = email) } } /** * The GitLab OAuth2 Provider. * * @param httpLayer The HTTP layer implementation. * @param stateHandler The state provider implementation. * @param settings The provider settings. */ class GitLabProvider( protected val httpLayer: HTTPLayer, protected val stateHandler: SocialStateHandler, val settings: OAuth2Settings) extends BaseGitLabProvider with CommonSocialProfileBuilder { /** * The type of this class. */ override type Self = GitLabProvider /** * The profile parser implementation. */ override val profileParser = new GitLabProfileParser /** * Gets a provider initialized with a new settings object. * * @param f A function which gets the settings passed and returns different settings. * @return An instance of the provider initialized with new settings. */ override def withSettings(f: (Settings) => Settings) = new GitLabProvider(httpLayer, stateHandler, f(settings)) } /** * The companion object. */ object GitLabProvider { /** * The error messages. */ val SpecifiedProfileError = "[Silhouette][%s] Error retrieving profile information. Error message: %s" /** * The GitLab constants. */ val ID = "gitlab" val API = "https://gitlab.com/api/v3/user?access_token=%s" }
mohiva/play-silhouette
silhouette/app/com/mohiva/play/silhouette/impl/providers/oauth2/GitLabProvider.scala
Scala
apache-2.0
4,311
package com.awesomesauce.minecraft.forge.openautomation.common.oc.te import com.awesomesauce.minecraft.forge.core.lib.item.BasicDismantleableTile import com.awesomesauce.minecraft.forge.openautomation.api.lasers.LaserHelper import com.awesomesauce.minecraft.forge.openautomation.common.oc.DataPacket import li.cil.oc.api.Network import li.cil.oc.api.machine.{Arguments, Callback, Context} import li.cil.oc.api.network.Visibility import li.cil.oc.api.prefab.TileEntityEnvironment import net.minecraftforge.common.util.ForgeDirection class TileEntityDataLaser extends TileEntityEnvironment with BasicDismantleableTile { val node_ = Network.newNode(this, Visibility.Network).withComponent("laser").withConnector(1000).create() node = node_ @Callback(doc = "function(side:integer,message:anything):boolean -- Sends a data laser with said packet. The receiving end will receive a signal 'laser_message',address,message") def sendDataLaser(context: Context, arguments: Arguments): Array[AnyRef] = { node_.tryChangeBuffer(-1) Array(LaserHelper.sendLaser(worldObj, xCoord, yCoord, zCoord, ForgeDirection.getOrientation(arguments.checkInteger(0)), new DataPacket(arguments.checkAny(1))).asInstanceOf[java.lang.Boolean]) } }
AwesomeSauceMods/OpenAutomation
main/scala/com/awesomesauce/minecraft/forge/openautomation/common/oc/te/TileEntityDataLaser.scala
Scala
mit
1,242
package net.cucumbersome.rpgRoller.warhammer.combat.domain import com.danielasfregola.randomdatagenerator.RandomDataGenerator import net.cucumbersome.UnitSpec import net.cucumbersome.rpgRoller.warhammer.combat.domain.InCombatActor.Name import net.cucumbersome.rpgRoller.warhammer.player.CombatActor import cats.syntax.option._ class InCombatActorSpec extends UnitSpec with RandomDataGenerator { import net.cucumbersome.test.CombatActorGenerator.arbitraryCombatActor "InCombatActor" should { val combatActor = random[CombatActor] val id = InCombatActor.Id("MyCustomId") val idGenerator = () => id "be properly builded from CombatActor when name is provided" in { val obtainedActor = InCombatActor.buildFromCombatActor(combatActor, idGenerator = idGenerator) obtainedActor.id mustBe id obtainedActor.name.data must include(combatActor.name.data) obtainedActor.initiative mustBe None obtainedActor.currentHealth mustBe combatActor.hp obtainedActor.actor mustBe combatActor } "be properly builded from CombatActor when no name is provided" in { val obtainedActor = InCombatActor.buildFromCombatActor(combatActor, name = Name("My name").some, idGenerator = idGenerator) obtainedActor.id mustBe id obtainedActor.name must not be Name("My Name") obtainedActor.initiative mustBe None obtainedActor.currentHealth mustBe combatActor.hp obtainedActor.actor mustBe combatActor } } }
CucumisSativus/rpgRollerBackend
src/test/scala/net/cucumbersome/rpgRoller/warhammer/combat/domain/InCombatActorSpec.scala
Scala
mit
1,495
package org.scaladebugger.api.profiles.java.requests.vm import com.sun.jdi.event.VMDeathEvent import org.scaladebugger.api.lowlevel.JDIArgument import org.scaladebugger.api.lowlevel.events.EventManager import org.scaladebugger.api.lowlevel.events.EventType.VMDeathEventType import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument import org.scaladebugger.api.lowlevel.requests.properties.UniqueIdProperty import org.scaladebugger.api.lowlevel.utils.JDIArgumentGroup import org.scaladebugger.api.lowlevel.vm._ import org.scaladebugger.api.pipelines.Pipeline.IdentityPipeline import org.scaladebugger.api.profiles.RequestHelper import org.scaladebugger.api.profiles.traits.info.InfoProducer import org.scaladebugger.api.profiles.traits.info.events.VMDeathEventInfo import org.scaladebugger.api.profiles.traits.requests.vm.VMDeathRequest import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine import scala.util.Try /** * Represents a java profile for vm death events that adds no * extra logic on top of the standard JDI. */ trait JavaVMDeathRequest extends VMDeathRequest { protected val vmDeathManager: VMDeathManager protected val eventManager: EventManager protected val scalaVirtualMachine: ScalaVirtualMachine protected val infoProducer: InfoProducer private lazy val eventProducer = infoProducer.eventProducer /** Represents helper utility to create/manage requests. */ private lazy val requestHelper = newVMDeathRequestHelper() /** * Constructs a new request helper for vm death. * * @return The new request helper */ protected def newVMDeathRequestHelper() = { type E = VMDeathEvent type EI = VMDeathEventInfo type RequestArgs = Seq[JDIRequestArgument] type CounterKey = Seq[JDIRequestArgument] new RequestHelper[E, EI, RequestArgs, CounterKey]( scalaVirtualMachine = scalaVirtualMachine, eventManager = eventManager, etInstance = VMDeathEventType, _newRequestId = () => java.util.UUID.randomUUID().toString, _newRequest = (requestId, _, jdiRequestArgs) => { vmDeathManager.createVMDeathRequestWithId( requestId, jdiRequestArgs: _* ) }, _hasRequest = (requestArgs) => { vmDeathManager.vmDeathRequestList .flatMap(vmDeathManager.getVMDeathRequestInfo) .map(_.extraArguments) .map(_.filterNot(_.isInstanceOf[UniqueIdProperty])) .contains(requestArgs) }, _removeRequestById = (requestId) => { vmDeathManager.removeVMDeathRequest(requestId) }, _newEventInfo = (s, event, jdiArgs) => { eventProducer.newDefaultVMDeathEventInfo(s, event, jdiArgs: _*) }, _retrieveRequestInfo = vmDeathManager.getVMDeathRequestInfo ) } /** * Retrieves the collection of active and pending vm death requests. * * @return The collection of information on vm death requests */ override def vmDeathRequests: Seq[VMDeathRequestInfo] = { val activeRequests = vmDeathManager.vmDeathRequestList.flatMap( vmDeathManager.getVMDeathRequestInfo ) activeRequests ++ (vmDeathManager match { case p: PendingVMDeathSupportLike => p.pendingVMDeathRequests case _ => Nil }) } /** * Constructs a stream of vm death events. * * @param extraArguments The additional JDI arguments to provide * @return The stream of vm death events and any retrieved data based on * requests from extra arguments */ override def tryGetOrCreateVMDeathRequestWithData( extraArguments: JDIArgument* ): Try[IdentityPipeline[VMDeathEventAndData]] = { val JDIArgumentGroup(rArgs, eArgs, _) = JDIArgumentGroup(extraArguments: _*) val requestArgs = rArgs requestHelper.newRequest(requestArgs, rArgs) .flatMap(id => requestHelper.newEventPipeline(id, eArgs, requestArgs)) } /** * Determines if the vm death request with the specified * arguments is pending. * * @param extraArguments The additional arguments provided to the specific * vm death request * @return True if there is at least one vm death request * with the provided extra arguments that is pending, otherwise false */ override def isVMDeathRequestWithArgsPending( extraArguments: JDIArgument* ): Boolean = { vmDeathRequests .filter(_.extraArguments == extraArguments) .exists(_.isPending) } /** * Removes all vm death requests with the specified extra arguments. * * @param extraArguments the additional arguments provided to the specific * vm death request * @return Some information about the removed request if it existed, * otherwise None */ override def removeVMDeathRequestWithArgs( extraArguments: JDIArgument* ): Option[VMDeathRequestInfo] = { vmDeathRequests.find(_.extraArguments == extraArguments).filter(c => vmDeathManager.removeVMDeathRequest(c.requestId) ) } /** * Removes all vm death requests. * * @return The collection of information about removed vm death requests */ override def removeAllVMDeathRequests(): Seq[VMDeathRequestInfo] = { vmDeathRequests.filter(c => vmDeathManager.removeVMDeathRequest(c.requestId) ) } }
ensime/scala-debugger
scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/java/requests/vm/JavaVMDeathRequest.scala
Scala
apache-2.0
5,336
package org.coursera.naptime import java.util.Date import javax.inject.Inject import akka.stream.Materializer import com.google.inject.Guice import com.google.inject.Stage import com.linkedin.data.schema.DataSchema import com.linkedin.data.schema.DataSchemaUtil import com.linkedin.data.schema.PrimitiveDataSchema import com.linkedin.data.schema.RecordDataSchema import org.coursera.naptime.model.KeyFormat import org.coursera.naptime.resources.TopLevelCollectionResource import org.coursera.naptime.router2.NaptimeRoutes import org.junit.Test import org.mockito.Mockito.mock import org.scalatest.junit.AssertionsForJUnit import play.api.libs.json.Json import play.api.libs.json.OFormat import scala.concurrent.ExecutionContext object NaptimeModuleTest { case class User(name: String, createdAt: Date) object User { implicit val oFormat: OFormat[User] = Json.format[User] } class MyResource(implicit val executionContext: ExecutionContext, val materializer: Materializer) extends TopLevelCollectionResource[String, User] { override implicit def resourceFormat: OFormat[User] = User.oFormat override def keyFormat: KeyFormat[KeyType] = KeyFormat.stringKeyFormat override def resourceName: String = "myResource" implicit val fields = Fields def get(id: String) = Nap.get(ctx => ???) } object MyFakeModule extends NaptimeModule { override def configure(): Unit = { bindResource[MyResource] bind[MyResource].toInstance(mock(classOf[MyResource])) bindSchemaType[Date](DataSchemaUtil.dataSchemaTypeToPrimitiveDataSchema(DataSchema.Type.LONG)) } } class OverrideTypesHelper @Inject()(val schemaOverrideTypes: NaptimeModule.SchemaTypeOverrides) } class NaptimeModuleTest extends AssertionsForJUnit { import NaptimeModuleTest._ /** * Check to ensure that configured type schema overrides are appropriately set. */ @Test def checkInferredOverrides(): Unit = { val injector = Guice.createInjector(Stage.DEVELOPMENT, MyFakeModule, NaptimeModule) val overrides = injector.getInstance(classOf[OverrideTypesHelper]) assert(overrides.schemaOverrideTypes.size === 1) assert(overrides.schemaOverrideTypes.contains("java.util.Date")) } @Test def checkComputedOverrides(): Unit = { val injector = Guice.createInjector(Stage.DEVELOPMENT, MyFakeModule, NaptimeModule) val overrides = injector.getInstance(classOf[OverrideTypesHelper]) val routes = injector.getInstance(classOf[NaptimeRoutes]) assert(1 === routes.routerBuilders.size) val routerBuilder = routes.routerBuilders.head val inferredSchemaKeyed = routerBuilder.types.find(_.key == "org.coursera.naptime.NaptimeModuleTest.User").get assert(inferredSchemaKeyed.value.isInstanceOf[RecordDataSchema]) val userSchema = inferredSchemaKeyed.value.asInstanceOf[RecordDataSchema] assert(2 === userSchema.getFields.size()) val initialCreatedAtSchema = userSchema.getField("createdAt").getType.getDereferencedDataSchema assert(initialCreatedAtSchema.isInstanceOf[RecordDataSchema]) assert( initialCreatedAtSchema .asInstanceOf[RecordDataSchema] .getDoc .contains("Unable to infer schema")) SchemaUtils.fixupInferredSchemas(userSchema, overrides.schemaOverrideTypes) val fixedCreatedAtSchema = userSchema.getField("createdAt").getType.getDereferencedDataSchema assert(fixedCreatedAtSchema.isInstanceOf[PrimitiveDataSchema]) } }
coursera/naptime
naptime-testing/src/test/scala/org/coursera/naptime/NaptimeModuleTest.scala
Scala
apache-2.0
3,469
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.classification import org.scalatest.Assertions._ import org.apache.spark.SparkFunSuite import org.apache.spark.ml.linalg.{Vector, Vectors} import org.apache.spark.ml.param.ParamMap import org.apache.spark.ml.util.MLTest import org.apache.spark.ml.util.TestingUtils._ import org.apache.spark.sql.{Dataset, Row} final class TestProbabilisticClassificationModel( override val uid: String, override val numFeatures: Int, override val numClasses: Int) extends ProbabilisticClassificationModel[Vector, TestProbabilisticClassificationModel] { override def copy(extra: org.apache.spark.ml.param.ParamMap): this.type = defaultCopy(extra) override def predictRaw(input: Vector): Vector = { input } override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = { rawPrediction } def friendlyPredict(values: Double*): Double = { predict(Vectors.dense(values.toArray)) } } class ProbabilisticClassifierSuite extends SparkFunSuite { test("test thresholding") { val testModel = new TestProbabilisticClassificationModel("myuid", 2, 2) .setThresholds(Array(0.5, 0.2)) assert(testModel.friendlyPredict(1.0, 1.0) === 1.0) assert(testModel.friendlyPredict(1.0, 0.2) === 0.0) } test("test thresholding not required") { val testModel = new TestProbabilisticClassificationModel("myuid", 2, 2) assert(testModel.friendlyPredict(1.0, 2.0) === 1.0) } test("test tiebreak") { val testModel = new TestProbabilisticClassificationModel("myuid", 2, 2) .setThresholds(Array(0.4, 0.4)) assert(testModel.friendlyPredict(0.6, 0.6) === 0.0) } test("test one zero threshold") { val testModel = new TestProbabilisticClassificationModel("myuid", 2, 2) .setThresholds(Array(0.0, 0.1)) assert(testModel.friendlyPredict(1.0, 10.0) === 0.0) assert(testModel.friendlyPredict(0.0, 10.0) === 1.0) } test("bad thresholds") { intercept[IllegalArgumentException] { new TestProbabilisticClassificationModel("myuid", 2, 2).setThresholds(Array(0.0, 0.0)) } intercept[IllegalArgumentException] { new TestProbabilisticClassificationModel("myuid", 2, 2).setThresholds(Array(-0.1, 0.1)) } } test("normalizeToProbabilitiesInPlace") { val vec1 = Vectors.dense(1.0, 2.0, 3.0).toDense ProbabilisticClassificationModel.normalizeToProbabilitiesInPlace(vec1) assert(vec1 ~== Vectors.dense(1.0 / 6, 2.0 / 6, 3.0 / 6) relTol 1e-3) // all-0 input test val vec2 = Vectors.dense(0.0, 0.0, 0.0).toDense intercept[IllegalArgumentException] { ProbabilisticClassificationModel.normalizeToProbabilitiesInPlace(vec2) } // negative input test val vec3 = Vectors.dense(1.0, -1.0, 2.0).toDense intercept[IllegalArgumentException] { ProbabilisticClassificationModel.normalizeToProbabilitiesInPlace(vec3) } } } object ProbabilisticClassifierSuite { /** * Mapping from all Params to valid settings which differ from the defaults. * This is useful for tests which need to exercise all Params, such as save/load. * This excludes input columns to simplify some tests. */ val allParamSettings: Map[String, Any] = ClassifierSuite.allParamSettings ++ Map( "probabilityCol" -> "myProbability", "thresholds" -> Array(0.4, 0.6) ) /** * Helper for testing that a ProbabilisticClassificationModel computes * the same predictions across all combinations of output columns * (rawPrediction/probability/prediction) turned on/off. Makes sure the * output column values match by comparing vs. the case with all 3 output * columns turned on. */ def testPredictMethods[ FeaturesType, M <: ProbabilisticClassificationModel[FeaturesType, M]]( mlTest: MLTest, model: M, testData: Dataset[_]): Unit = { val allColModel = model.copy(ParamMap.empty) .setRawPredictionCol("rawPredictionAll") .setProbabilityCol("probabilityAll") .setPredictionCol("predictionAll") val allColResult = allColModel.transform(testData.select(allColModel.getFeaturesCol)) .select(allColModel.getFeaturesCol, "rawPredictionAll", "probabilityAll", "predictionAll") for (rawPredictionCol <- Seq("", "rawPredictionSingle")) { for (probabilityCol <- Seq("", "probabilitySingle")) { for (predictionCol <- Seq("", "predictionSingle")) { val newModel = model.copy(ParamMap.empty) .setRawPredictionCol(rawPredictionCol) .setProbabilityCol(probabilityCol) .setPredictionCol(predictionCol) import allColResult.sparkSession.implicits._ mlTest.testTransformer[(Vector, Vector, Vector, Double)](allColResult, newModel, if (rawPredictionCol.isEmpty) "rawPredictionAll" else rawPredictionCol, "rawPredictionAll", if (probabilityCol.isEmpty) "probabilityAll" else probabilityCol, "probabilityAll", if (predictionCol.isEmpty) "predictionAll" else predictionCol, "predictionAll" ) { case Row( rawPredictionSingle: Vector, rawPredictionAll: Vector, probabilitySingle: Vector, probabilityAll: Vector, predictionSingle: Double, predictionAll: Double ) => { assert(rawPredictionSingle ~== rawPredictionAll relTol 1E-3) assert(probabilitySingle ~== probabilityAll relTol 1E-3) assert(predictionSingle ~== predictionAll relTol 1E-3) } } } } } } }
maropu/spark
mllib/src/test/scala/org/apache/spark/ml/classification/ProbabilisticClassifierSuite.scala
Scala
apache-2.0
6,361
package com.nthportal.euler package h0.t3 import com.nthportal.euler.maths.streams import com.nthportal.euler.maths.streams.Primes object Problem37 extends ProjectEulerProblem { override def apply(): Long = { implicit val primes = streams.primes primes() .dropWhile(_ < 10) .filter(isDoublyTruncatable) .take(11) .sum } private def isDoublyTruncatable(num: Long)(implicit primes: Primes): Boolean = { isRightTruncatable(num) && isLeftTruncatable(num) } private def isRightTruncatable(num: Long)(implicit primes: Primes): Boolean = { Stream.iterate(num) {_ / 10} .takeWhile(_ > 0) .forall(_.isPrime) } private def isLeftTruncatable(num: Long)(implicit primes: Primes): Boolean = { Stream.iterate(num.digits.tail) {_.tail} .takeWhile(_.nonEmpty) .forall(_.asNumber.isPrime) } }
NthPortal/euler-n-scala
src/main/scala/com/nthportal/euler/h0/t3/Problem37.scala
Scala
mit
865
package org.scalafmt.cli import java.io.File import java.util.Date import org.scalafmt.Versions import org.scalafmt.util.AbsoluteFile import scopt.OptionParser object CliArgParser { val usageExamples: String = """|scalafmt # Format all files in the current project, configuration is determined in this order: | # 1. .scalafmt.conf file in current directory | # 2. .scalafmt.conf inside root directory of current git repo | # 3. no configuration, default style |scalafmt --test # throw exception on mis-formatted files, won't write to files. |scalafmt --mode diff # Format all files that were edited in git diff against master branch. |scalafmt --mode changed # Format files listed in `git status` (latest changes against previous commit. |scalafmt --diff-branch 2.x # same as --diff, except against branch 2.x |scalafmt --stdin # read from stdin and print to stdout |scalafmt --stdin --assume-filename foo.sbt < foo.sbt # required when using --stdin to format .sbt files. |scalafmt Code1.scala A.scala # write formatted contents to file. |scalafmt --stdout Code.scala # print formatted contents to stdout. |scalafmt --exclude target # format all files in directory excluding target |scalafmt --config .scalafmt.conf # read custom style from file. |scalafmt --config-str "style=IntelliJ" # define custom style as a flag, must be quoted.""".stripMargin val scoptParser: OptionParser[CliOptions] = new scopt.OptionParser[CliOptions]("scalafmt") { override def showUsageOnError = false private def printAndExit( inludeUsage: Boolean )(ignore: Unit, c: CliOptions): CliOptions = { if (inludeUsage) showUsage else showHeader sys.exit c } private def readConfig(contents: String, c: CliOptions): CliOptions = { c.copy(configStr = Some(contents)) } private def readConfigFromFile( file: String, c: CliOptions ): CliOptions = { val configFile = AbsoluteFile.fromFile(new File(file), c.common.workingDirectory) c.copy(config = Some(configFile.jfile.toPath)) } private def addFile(file: File, c: CliOptions): CliOptions = { val absFile = AbsoluteFile.fromFile(file, c.common.workingDirectory) c.copy(customFiles = c.customFiles :+ absFile) } head("scalafmt", Versions.nightly) opt[Unit]('h', "help") .action(printAndExit(inludeUsage = true)) .text("prints this usage text") opt[Unit]('v', "version") .action(printAndExit(inludeUsage = false)) .text("print version ") arg[File]("<file>...") .optional() .unbounded() .action((file, c) => addFile(file, c)) .text( "file or directory, in which case all *.scala files are formatted." ) opt[Seq[File]]('f', "files") .action { (files, c) => c.copy( customFiles = AbsoluteFile.fromFiles(files, c.common.workingDirectory) ) } .hidden() // this option isn't needed anymore. Simply pass the files as // arguments. Keeping for backwards compatibility .text( "file or directory, in which case all *.scala files are formatted. Deprecated: pass files as arguments" ) opt[Unit]('i', "in-place") .action((opt, c) => c.copy(writeMode = Override)) .hidden() // this option isn't needed anymore. Simply don't pass // --stdout. Keeping for backwards compatibility .text("format files in-place (default)") opt[Unit]("stdout") .action((opt, c) => c.copy(writeMode = Stdout)) .text("write formatted files to stdout") opt[Boolean]("git") .action((opt, c) => c.copy(git = Some(opt))) .text("if true, ignore files in .gitignore (default false)") opt[Seq[String]]("exclude") .unbounded() .action((excludes, c) => c.copy(customExcludes = excludes)) .text( "file or directory, when missing all *.scala files are formatted." ) opt[String]('c', "config") .action(readConfigFromFile) .text("a file path to .scalafmt.conf.") opt[String]("config-str") .action(readConfig) .text("configuration defined as a string") opt[Unit]("stdin") .action((_, c) => c.copy(stdIn = true)) .text("read from stdin and print to stdout") opt[Unit]("no-stderr") .action((_, c) => c.copy(noStdErr = true)) .text("don't use strerr for messages, output to stdout") opt[String]("assume-filename") .action((filename, c) => c.copy(assumeFilename = filename)) .text( "when using --stdin, use --assume-filename to hint to scalafmt that the input is an .sbt file." ) opt[Unit]("test") .action((_, c) => c.copy(testing = true)) .text("test for mis-formatted code, exits with status 1 on failure.") opt[Unit]("check") .action((_, c) => c.copy(check = true)) .text( "test for mis-formatted code, exits with status 1 on first failure." ) opt[File]("migrate2hocon") .action( (file, c) => c.copy( migrate = Some(AbsoluteFile.fromFile(file, c.common.workingDirectory)) ) ) .text( """migrate .scalafmt CLI style configuration to hocon style configuration in .scalafmt.conf""" ) opt[Unit]("diff") .action((_, c) => c.copy(mode = Option(DiffFiles("master")))) .text( s"""Format files listed in `git diff` against master. |Deprecated: use --mode diff instead""".stripMargin ) opt[FileFetchMode]("mode") .action((m, c) => c.copy(mode = Option(m))) .text( s"""Sets the files to be formatted fetching mode. |Options: | diff - format files listed in `git diff` against master | changed - format files listed in `git status` (latest changes against previous commit)""".stripMargin ) opt[String]("diff-branch") .action((branch, c) => c.copy(mode = Option(DiffFiles(branch)))) .text( "If set, only format edited files in git diff against provided branch. Has no effect if mode set to `changed`." ) opt[Unit]("build-info") .action({ case (_, c) => println(buildInfo) sys.exit }) .text("prints build information") opt[Unit]("quiet") .action((_, c) => c.copy(quiet = true)) .text("don't print out stuff to console.") opt[Unit]("debug") .action((_, c) => c.copy(debug = true)) .text("print out diagnostics to console.") opt[Unit]("non-interactive") .action((_, c) => c.copy(nonInteractive = true)) .text("disable fancy progress bar, useful in ci or sbt plugin.") opt[Unit]("list") .action((_, c) => c.copy(list = true)) .text("list files that are different from scalafmt formatting") opt[(Int, Int)]("range") .hidden() .action({ case ((from, to), c) => val offset = if (from == to) 0 else -1 c.copy(range = c.range + Range(from - 1, to + offset)) }) .text("(experimental) only format line range from=to") note(s"""|Examples: |$usageExamples |Please file bugs to https://github.com/scalameta/scalafmt/issues """.stripMargin) } def buildInfo = s"""build commit: ${Versions.commit} |build time: ${new Date(Versions.timestamp.toLong)}""".stripMargin }
olafurpg/scalafmt
scalafmt-cli/src/main/scala/org/scalafmt/cli/CliArgParser.scala
Scala
apache-2.0
7,869
/* * Copyright 2001-2015 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import scala.collection.mutable.ListBuffer import org.scalatest.SharedHelpers.SilentReporter import org.scalatest.funsuite.AsyncFunSuite class RandomAsyncTestExecutionSpec extends AsyncFunSuite /* with RandomTestOrder*/ { thisOuterSuite => private val buf = ListBuffer.empty[Int] class ExampleSpec extends AsyncFunSuite with RandomTestOrder { test("test one") { thisOuterSuite.synchronized { buf += 1 }; succeed } test("test two") { thisOuterSuite.synchronized { buf += 2 }; succeed } test("test three") { thisOuterSuite.synchronized { buf += 3 }; succeed } test("test four") { thisOuterSuite.synchronized { buf += 4 }; succeed } test("test five") { thisOuterSuite.synchronized { buf += 5 }; succeed } test("test six") { thisOuterSuite.synchronized { buf += 6 }; succeed } test("test seven") { thisOuterSuite.synchronized { buf += 7 }; succeed } override def newInstance = new ExampleSpec } test("Ensure that mixing RandomTestOrder into an AsyncSuite actually randomizes the test order") { val a = new ExampleSpec val status = a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap.empty, None, new Tracker, Set.empty)) status.toFuture.map { _ => val list = thisOuterSuite.synchronized { buf.toList } assert(list != list.sorted) } } }
scalatest/scalatest
jvm/scalatest-test/src/test/scala/org/scalatest/RandomAsyncTestExecutionSpec.scala
Scala
apache-2.0
1,951
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.apollo.openwire import org.apache.activemq.apollo.dto.{TopicDestinationDTO, QueueDestinationDTO, DestinationDTO} import org.apache.activemq.apollo.openwire.command._ import java.lang.String import org.apache.activemq.apollo.broker.{DestinationAddress, SimpleAddress, DestinationParser} import org.apache.activemq.apollo.util.path.{Path, LiteralPart} /** * <p> * </p> * * @author <a href="http://hiramchirino.com">Hiram Chirino</a> */ object DestinationConverter { val OPENWIRE_PARSER = new DestinationParser(); OPENWIRE_PARSER.queue_prefix = ActiveMQDestination.QUEUE_QUALIFIED_PREFIX OPENWIRE_PARSER.topic_prefix = ActiveMQDestination.TOPIC_QUALIFIED_PREFIX OPENWIRE_PARSER.temp_queue_prefix = ActiveMQDestination.TEMP_QUEUE_QUALIFED_PREFIX OPENWIRE_PARSER.temp_topic_prefix = ActiveMQDestination.TEMP_TOPIC_QUALIFED_PREFIX OPENWIRE_PARSER.path_separator = "." OPENWIRE_PARSER.any_child_wildcard = "*" OPENWIRE_PARSER.any_descendant_wildcard = ">" OPENWIRE_PARSER.part_pattern = null OPENWIRE_PARSER.regex_wildcard_start = null OPENWIRE_PARSER.regex_wildcard_end = null def to_destination_dto(dest: ActiveMQDestination, handler:OpenwireProtocolHandler): Array[SimpleAddress] = { def fallback(value:String) = { OPENWIRE_PARSER.decode_single_destination(dest.getQualifiedPrefix+value, null) } OPENWIRE_PARSER.decode_multi_destination(dest.getPhysicalName.toString, fallback).map { dest => if( dest.domain.startsWith("temp-") ) { // Put it back together... val name = OPENWIRE_PARSER.encode_path(dest.path) val (connectionid, rest) = name.splitAt(name.lastIndexOf(':')) val real_path = "temp" :: handler.broker.id :: connectionid :: rest.substring(1) :: Nil SimpleAddress(dest.domain.stripPrefix("temp-"), OPENWIRE_PARSER.decode_path(real_path)) } else { dest } } } def to_activemq_destination(addresses:Array[_ <: DestinationAddress]):ActiveMQDestination = { ActiveMQDestination.createDestination(OPENWIRE_PARSER.encode_destination(addresses.map{ address=> address.path.parts match { // Remap temp destinations that look like openwire temp destinations. case LiteralPart("temp") :: LiteralPart(broker) :: LiteralPart(session) :: LiteralPart(tempid) :: Nil => if( session.startsWith("ID:") ) { SimpleAddress("temp-"+address.domain, Path(session+":"+tempid)) } else { address } case _ => address } })) } }
chirino/activemq-apollo
apollo-openwire/src/main/scala/org/apache/activemq/apollo/openwire/DestinationConverter.scala
Scala
apache-2.0
3,362
package panop /** * Contains a few communication case classes and object. * @author Mathieu Demarne (mathieu.demarne@gmail.com) */ package object com { /* QUERY MECHANISMS */ /** Url encapsulation. * @param link The HTTP url itself * @param depth The current research depth. */ case class Url(link: String, depth: Int = 0) { override def toString = link.toString } /** Represents a search to be made on a specific Url, for a specific query, * @param url The Url to search * @param query The query associated with the search * @param coTentatives The number of tentatives used to sar */ case class Search(url: Url, query: Query, coTentatives: Int = 0) { override def toString = s"$query\\nOn: $url" } /** Represent a result along with the associated base search. * @param search The original search * @param matches Sequence of tuples of matching string (organized as a * sequence, as tuples vary in length). * @param links Link found when walking the content associated with the * search. */ case class Result(search: Search, matches: Seq[Seq[String]], links: Set[String]) { def isPositive = !matches.isEmpty } /** Represents a failed search. */ case class Failed(search: Search) /* SEARCH WORKFLOW COMMUNICATION */ /* Communication used by UI/Console to display results */ case object AskResults case class AswResults(results: List[Result]) /* Communication used by UI/Console to request progress stats */ case object AskProgress case class AswProgress(percent: Double, nbExplored: Int, nbFound: Int, nbMatches: Int, nbMissed: Int) }
mdemarne/panop-core
src/main/scala/panop/com/com.scala
Scala
apache-2.0
1,717
package com.github.mdr.mash.ns.core import com.github.mdr.mash.functions.{ BoundParams, MashFunction, Parameter, ParameterModel } import com.github.mdr.mash.runtime.MashUnit object WhileFunction extends MashFunction("core.while") { object Params { val Condition = Parameter( nameOpt = Some("condition"), summaryOpt = Some("Condition of the while loop"), isLazy = true) val Block = Parameter( nameOpt = Some("block"), summaryOpt = Some("Code to execute"), isLazy = true) } import Params._ val params = ParameterModel(Condition, Block) def call(boundParams: BoundParams): MashUnit = { val cond = boundParams(Condition).asInstanceOf[MashFunction] val block = boundParams(Block).asInstanceOf[MashFunction] while (cond.callNullary().isTruthy) block.callNullary() MashUnit } override def typeInferenceStrategy = Unit override def summaryOpt = Some("Loop executing the given block while the condition remains true") }
mdr/mash
src/main/scala/com/github/mdr/mash/ns/core/WhileFunction.scala
Scala
mit
1,002
/* * Copyright 2015 Textocat * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.textocat.textokit.shaltef.mappings import com.textocat.textokit.morph.fs.Wordform import com.textocat.textokit.shaltef.mappings.impl.DefaultDepToArgMappingsBuilder /** * @author Rinat Gareev */ trait DepToArgMappingsHolder { def containsTriggerLemma(lemmaId: Int): Boolean def getMappingsTriggeredBy(wf: Wordform): Iterable[DepToArgMapping] } trait DepToArgMappingsBuilder { def add(mp: DepToArgMapping) /** * Return defensive copy of current mapping collection */ def getMappings(): Iterable[DepToArgMapping] def replace(old: DepToArgMapping, newMp: DepToArgMapping) def build(): DepToArgMappingsHolder } object DepToArgMappingsBuilder { def apply(): DepToArgMappingsBuilder = new DefaultDepToArgMappingsBuilder }
textocat/textokit-core
Textokit.ShalTeF/src/main/scala/com/textocat/textokit/shaltef/mappings/DepToArgMappingsHolder.scala
Scala
apache-2.0
1,384
package org.scalacoin.util /** * Created by chris on 2/26/16. * Trait to implement ubiquitous factory functions across our codebase */ trait Factory[T] { /** * Creates a T out of a hex string * @param hex * @return */ def fromHex(hex : String) : T = fromBytes(BitcoinSUtil.decodeHex(hex)) /** * Creates a T out of a sequence of bytes * @param bytes * @return */ def fromBytes(bytes : Seq[Byte]) : T }
TomMcCabe/scalacoin
src/main/scala/org/scalacoin/util/Factory.scala
Scala
mit
440
/* Copyright 2009-2016 EPFL, Lausanne */ package leon.isabelle import scala.concurrent._ import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global import leon._ import leon.frontends.scalac.ExtractionPhase import leon.purescala.Definitions._ import leon.utils._ import leon.solvers.isabelle._ import leon.test._ import leon.utils.PreprocessingPhase class IsabelleLibrarySuite extends LeonRegressionSuite { object IsabelleNoopPhase extends UnitPhase[Program] { val name = "isabelle-noop" val description = "Isabelle definitions" implicit val debugSection = DebugSectionIsabelle def apply(context: LeonContext, program: Program): Unit = Await.result(IsabelleEnvironment(context, program).map(_ => ()), Duration.Inf) } test("Define the library") { val pipeline = ExtractionPhase andThen new PreprocessingPhase andThen IsabelleNoopPhase val ctx = Main.processOptions(Seq("--functions=_")).copy(reporter = new TestSilentReporter()) pipeline.run(ctx, Nil) } }
epfl-lara/leon
src/test/scala/leon/isabelle/IsabelleLibrarySuite.scala
Scala
gpl-3.0
1,042
package monocle.std import monocle.{Iso, Lens, PTraversal} import monocle.function._ @deprecated("instances have been move to typeclass companion object", since = "1.4.0") object tuple3 @deprecated("instances have been move to typeclass companion object", since = "1.4.0") trait Tuple3Optics
fkz/Monocle
core/shared/src/main/scala/monocle/std/Tuple3.scala
Scala
mit
295
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.cluster.test import akka.testkit.ImplicitSender import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FlatSpecLike, Matchers} import org.squbs.cluster._ import org.squbs.testkit.Timeouts._ trait ZkClusterTestHelper extends FlatSpecLike with ImplicitSender with Matchers with BeforeAndAfterAll with BeforeAndAfterEach { me: ZkClusterMultiActorSystemTestKit => override def beforeAll(): Unit = startCluster() override def afterAll(): Unit = shutdownCluster() override val timeout = awaitMax override val clusterSize: Int = 6 override def afterEach(): Unit = { Thread.sleep(timeout.toMillis / 10) } protected def checkLeaderInSync(leader: ZkLeadership) = { zkClusterExts foreach { case (name, ext) => ext tell (ZkQueryLeadership, self) expectMsg(timeout, leader) } } protected def checkMembersInSync(expected: Set[String]) = { zkClusterExts foreach { case (name, ext) => ext tell (ZkQueryMembership, self) expectMsgType[ZkMembership](timeout).members map (_.system) should be (expected) } } protected def killLeader(leader: ZkLeadership) = { val toBeKilled = leader.address.system killSystem(toBeKilled) Thread.sleep(timeout.toMillis / 10) toBeKilled } protected def killFollower(leader: ZkLeadership) = { val leaderName = leader.address.system val toBeKilled = pickASystemRandomly(Some(leaderName)) killSystem(toBeKilled) Thread.sleep(timeout.toMillis / 10) toBeKilled } protected def getLeader(name: String) = { zkClusterExts(name) tell (ZkQueryLeadership, self) expectMsgType[ZkLeadership](timeout) } protected val originalMembers = (0 until clusterSize).map(int2SystemName).toSet }
SarathChandran/squbs
squbs-zkcluster/src/test/scala/org/squbs/cluster/test/ZkClusterTestHelper.scala
Scala
apache-2.0
2,346
package org.sameersingh.ervisualizer.data import org.sameersingh.ervisualizer.kba._ import play.api.libs.json._ import play.api.libs.functional.syntax._ /** * Created by sameer on 7/20/14. */ object JsonWrites { implicit val seqStringPairWrites: Writes[Seq[(String, String)]] = new Writes[Seq[(String, String)]] { override def writes(o: Seq[(String, String)]): JsValue = { Json.toJson(o.map(p => Json.toJson(Seq(p._1, p._2)))) } } val seqIntPairWrites: Writes[Seq[(Int, Int)]] = new Writes[Seq[(Int, Int)]] { override def writes(o: Seq[(Int, Int)]): JsValue = { Json.toJson(o.map(p => Json.toJson(Seq(p._1, p._2)))) } } implicit val provWrites = { implicit val seqIntPairWritesImplicit = seqIntPairWrites Json.writes[Provenance] } implicit val senWrites = Json.writes[Sentence] implicit val docWrites = Json.writes[Document] implicit val entityHeaderWrites = Json.writes[EntityHeader] implicit val entityInfoWrites = Json.writes[EntityInfo] implicit val entityFbWrites = Json.writes[EntityFreebase] implicit val entityTxtWrites = Json.writes[EntityText] implicit val entityTypeProvWrites = Json.writes[TypeModelProvenances] implicit val relationHeaderWrites = Json.writes[RelationHeader] implicit val relationFreebaseWrites = Json.writes[RelationFreebase] implicit val relationTextWrites = Json.writes[RelationText] implicit val relationProvWrites = Json.writes[RelModelProvenances] implicit val wordWrites = Json.writes[Word] implicit val clusterWrites = Json.writes[Cluster] implicit val stalenessWrites = Json.writes[Staleness] implicit val documentWrites = Json.writes[Doc] implicit val entityWrites = Json.writes[Entity] implicit val stalenessKbaWrites = Json.writes[StalenessKba] implicit val docKbaWrites: Writes[DocumentKba] = ( (JsPath \\ "streamid").write[String] and (JsPath \\ "timestamp").write[Long] and (JsPath \\ "relevance").write[Int] and (JsPath \\ "score").write[Int] and (JsPath \\ "ci").write[Int] and (JsPath \\ "lambdas").write[Seq[StalenessKba]] )(unlift(DocumentKba.unapply)) implicit val entityKbaWrites = Json.writes[EntityKba] implicit val wordKbaWrites = Json.writes[WordKba] } object JsonReads { implicit val seqStringPairReads: Reads[Seq[(String, String)]] = new Reads[Seq[(String, String)]] { override def reads(json: JsValue): JsResult[Seq[(String, String)]] = { Json.fromJson[Seq[Seq[String]]](json).flatMap(seqs => JsSuccess(seqs.map(seq => seq(0) -> seq(1)))) } } val seqIntPairReads: Reads[Seq[(Int, Int)]] = new Reads[Seq[(Int, Int)]] { override def reads(json: JsValue): JsResult[Seq[(Int, Int)]] = { Json.fromJson[Seq[Seq[Int]]](json).flatMap(seqs => JsSuccess(seqs.map(seq => seq(0) -> seq(1)))) } } implicit val provReads = { implicit val seqIntPairReadsImplicit = seqIntPairReads Json.reads[Provenance] } implicit val senReads = Json.reads[Sentence] implicit val docReads = Json.reads[Document] implicit val entityHeaderReads = Json.reads[EntityHeader] implicit val entityInfoReads = Json.reads[EntityInfo] implicit val entityFbReads = Json.reads[EntityFreebase] implicit val entityTxtReads = Json.reads[EntityText] implicit val entityTypeProvReads = Json.reads[TypeModelProvenances] implicit val relationHeaderReads = Json.reads[RelationHeader] implicit val relationFreebaseReads = Json.reads[RelationFreebase] implicit val relationTextReads = Json.reads[RelationText] implicit val relationProvReads = Json.reads[RelModelProvenances] implicit val wordReads = Json.reads[Word] implicit val clusterReads = Json.reads[Cluster] implicit val stalenessReads = Json.reads[Staleness] implicit val documentReads = Json.reads[Doc] implicit val entityReads = Json.reads[Entity] implicit val stalenessKbaReads = Json.reads[StalenessKba] implicit val docKbaReads: Reads[DocumentKba] = ( (JsPath \\ "streamid").read[String] and (JsPath \\ "timestamp").read[Long] and (JsPath \\ "relevance").read[Int] and (JsPath \\ "score").read[Int] and (JsPath \\ "ci").read[Int] and (JsPath \\ "lambdas").read[Seq[StalenessKba]] )(DocumentKba.apply _) implicit val entityKbaReads = Json.reads[EntityKba] implicit val wordKbaReads = Json.reads[WordKba] implicit val clusterKbaReads: Reads[ClusterKba] = ( (JsPath \\ "cj").read[Int] and (JsPath \\ "cj_emb").read[Seq[WordKba]] )(ClusterKba.apply _) implicit val embeddingKbaReads: Reads[EmbeddingKba] = ( (JsPath \\ "streamid").read[String] and (JsPath \\ "timestamp").read[Long] and (JsPath \\ "di").read[Seq[WordKba]] and (JsPath \\ "clusters").read[Seq[ClusterKba]] )(EmbeddingKba.apply _) }
sameersingh/er-visualizer
app/org/sameersingh/ervisualizer/data/JsonWrites.scala
Scala
bsd-2-clause
4,699
package asobu.distributed.service import java.io.File import asobu.distributed.EndpointDefinition import asobu.distributed.gateway.Endpoint.Prefix import play.routes.compiler.{Route, RoutesCompilationError, RoutesFileParser} import scala.io.Source object EndpointDefinitionParser { /** * for testing purpose only * @param prefix * @param content * @param createEndpointDef * @return */ private[distributed] def parse( prefix: Prefix, content: String, createEndpointDef: (Route, Prefix) ⇒ EndpointDefinition ): Either[Seq[RoutesCompilationError], List[EndpointDefinition]] = { parseContent(content, "remote-routes").right.map(_.map(createEndpointDef(_, prefix))) } //to conform to play api private def placeHolderFile(resourceName: String) = new File(resourceName) def parseResource( resourceName: String = "remote.routes" ): Either[Seq[RoutesCompilationError], List[Route]] = { def routesFileNotFound = RoutesCompilationError( placeHolderFile(resourceName), s"$resourceName doesn't exsit in resources.", None, None ) Option(getClass.getClassLoader.getResourceAsStream(resourceName)) .toRight(List(routesFileNotFound)) .right.flatMap { inputStream ⇒ val content = Source.fromInputStream(inputStream).mkString parseContent(content, resourceName) } } def parseContent( content: String, resourceName: String ): Either[Seq[RoutesCompilationError], List[Route]] = { import cats.std.either._ import cats.std.list._ import cats.syntax.traverse._ val phf = placeHolderFile(resourceName) //to conform to play api lazy val unsupportedError = Seq(RoutesCompilationError(phf, "doesn't support anything but route", None, None)) RoutesFileParser.parseContent(content, phf).right.flatMap { routes ⇒ routes.traverse[Either[Seq[RoutesCompilationError], ?], Route] { case r: Route ⇒ Right(r) case _ ⇒ Left(unsupportedError) } } } }
kailuowang/asobu
distributed/src/main/scala/asobu/distributed/service/EndpointDefinitionParser.scala
Scala
apache-2.0
2,025
/** * Copyright (C) 2015-2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nl.knaw.dans.easy.stage.fileitem import nl.knaw.dans.easy.stage.AbstractConfSpec import org.rogach.scallop.ScallopConf class FileItemConfSpec extends AbstractConfSpec { override def getConf: ScallopConf = FileItemConf.dummy }
ekoi/easy-stage-dataset
src/test/scala/nl/knaw/dans/easy/stage/fileitem/FileItemConfSpec.scala
Scala
apache-2.0
900
/** * The MIT License (MIT) * <p/> * Copyright (c) 2016 ScalateKids * <p/> * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * <p/> * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * <p/> * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * <p/> * @author Scalatekids * @version 1.0 * @since 1.0 */ package com.actorbase.cli.models /** * A command of the ActorbaseCLI. * This class extends the Command trait * in the models package of the ActorbaseCLI. * @param cr an instance of command reciver to implement design pattern */ class ExportCommand(cr: CommandReceiver) extends Command { /** * Method used to call the right method of a class that contains the code to run the Command. * * @return a String containing the result to return to the user of the Command invoked */ override def execute() : String = cr.export }
ScalateKids/Actorbase-Client
src/main/scala/com/actorbase/cli/models/ExportCommand.scala
Scala
mit
1,804
/** * @author Yuuto */ package yuuto.enhancedinventories.network import net.minecraft.tileentity.TileEntity import net.minecraft.world.World import net.minecraftforge.common.DimensionManager import cpw.mods.fml.common.network.simpleimpl.IMessage import cpw.mods.fml.common.network.simpleimpl.IMessageHandler import cpw.mods.fml.common.network.simpleimpl.MessageContext; import yuuto.yuutolib.tile.IRedstoneControl.ControlMode import yuuto.yuutolib.tile.IRedstoneControl class MessageRedstoneControlHandler extends IMessageHandler[MessageRedstoneControl, IMessage] { override def onMessage(message:MessageRedstoneControl, ctx:MessageContext):IMessage={ val controlMode:ControlMode = ControlMode.values()(message.id); val world:World = DimensionManager.getWorld(message.dim); val tile:TileEntity = world.getTileEntity(message.x, message.y, message.z); if(tile != null && tile.isInstanceOf[IRedstoneControl]){ tile.asInstanceOf[IRedstoneControl].setControl(controlMode); } return null; } }
AnimeniacYuuto/EnhancedInventories
src/main/scala/yuuto/enhancedinventories/network/MessageRedstoneControlHandler.scala
Scala
gpl-2.0
1,045
/* * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * * The Apereo Foundation licenses this file to you under the Apache License, * Version 2.0, (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tle.web.sections.equella import com.tle.web.sections.SectionWriter import com.tle.web.sections.events.PreRenderContext import com.tle.web.sections.render.SectionRenderable class ScalaSectionRenderable(render: SectionWriter => Unit, prerender: PreRenderContext => Unit = _ => ()) extends SectionRenderable { override def realRender(writer: SectionWriter): Unit = render(writer) override def preRender(info: PreRenderContext): Unit = prerender(info) }
equella/Equella
Source/Plugins/Core/com.equella.core/scalasrc/com/tle/web/sections/equella/ScalaSectionRenderable.scala
Scala
apache-2.0
1,306
package io.scalajs.nodejs package zlib import scala.scalajs.js /** * Compress data using deflate, and do not append a zlib header. * @author lawrence.daniels@gmail.com */ @js.native trait DeflateRaw extends CompressionAlgorithm
scalajs-io/nodejs
app/common/src/main/scala/io/scalajs/nodejs/zlib/DeflateRaw.scala
Scala
apache-2.0
235
package unof.cv.base.charLib object CMAdress { def apply(cm: CharacterLibrary, category: Int, part: Int, layer: Int) = { val p = cm.getPart(category, part) val layerSelect = { if (p.images.size > layer || p.shapes.isEmpty) SelectImages else SelectShapes } new CMAdress(category, part, layer, layerSelect) } def apply() = new CMAdress(-1, -1, -1, SelectNone) def apply(category: Int) = new CMAdress(category, -1, -1, SelectNone) def apply(category: Int, part: Int) = new CMAdress(category, part, -1, SelectNone) def apply( category: Int, part: Int, layer: Int, layerSelect: LayersSelector) = { if (category < 0) new CMAdress(-1, -1, -1, SelectNone) else if (part < 0) new CMAdress(category, -1, -1, SelectNone) else if (layer < 0) new CMAdress(category, part, -1, SelectNone) else if (layerSelect == SelectNone) new CMAdress(category, part, -1, SelectNone) else new CMAdress(category, part, layer, layerSelect) } def apply( category: Int, part: Int, layer: Int, cmlayer: CMLayer): CMAdress = CMAdress(category, part, layer, LayersSelector(cmlayer)) def unapply(a: CMAdress) = Some((a.category, a.part, a.layer, a.layerSelect)) implicit def toT4(a: CMAdress) = (a.category, a.part, a.layer, a.layerSelect) } sealed class CMAdress( val category: Int, val part: Int, val layer: Int, val layerSelect: LayersSelector) { def getCategory(cm: CharacterLibrary) = { if (category < 0) None else Some(cm.categories(category)) } def getLayer(cm: CharacterLibrary) = { getPart(cm) match { case None => None case Some(sPart) => layerSelect match { case SelectShapes => Some(sPart.shapes(layer)) case SelectImages => Some(sPart.images(layer)) case SelectNone => None } } } def forSelectedPart(cm: CharacterLibrary)(f: (CMPart) => Unit) = if (part >= 0) { f(cm.categories(category).possibleParts(part)) } def getPart(cm: CharacterLibrary) = if (layer >= 0) Some(cm.categories(category).possibleParts(part)) else None def forSelectedImage(cm: CharacterLibrary)(f: (CMImage) => Unit) = { forSelectedPart(cm)(layerSelect.forImages(_)(s => f(s(layer)))) } def forSelectedShape(cm: CharacterLibrary)(f: (CMShape) => Unit) = { forSelectedPart(cm)(layerSelect.forShapes(_)(s => f(s(layer)))) } def forWhateverSelected(cm: CharacterLibrary)(f: (CMLayer) => Unit) = { forSelectedPart(cm)(layerSelect.forAnyLayers(_)(s => f(s(layer)))) } def forSelected( cm: CharacterLibrary, fl: (CMLayer) => Unit, fp: (CMPart) => Unit, fc: (CMCategory) => Unit) = { if (layer >= 0) { forWhateverSelected(cm)(fl) } else if (part >= 0) fp(cm.categories(category).possibleParts(part)) else if (category >= 0) fc(cm.categories(category)) } def forSelected( cm: CharacterLibrary, fi: (CMImage) => Unit, fs: (CMShape) => Unit, fp: (CMPart) => Unit, fc: (CMCategory) => Unit) = { if (layer >= 0) { forSelectedImage(cm)(fi) forSelectedShape(cm)(fs) } else if (part >= 0) fp(cm.categories(category).possibleParts(part)) else if (category >= 0) fc(cm.categories(category)) } def mapSelected[A]( cm: CharacterLibrary, fl: (CMLayer) => A, fp: (CMPart) => A, fc: (CMCategory) => A): A = { if (layer >= 0) { val p = cm.categories(category).possibleParts(part) layerSelect match { case SelectImages => fl(p.images(layer)) case SelectShapes => fl(p.shapes(layer)) case SelectNone => throw new UnsupportedOperationException("Positive layer with slect none") } } else if (part >= 0) fp(cm.categories(category).possibleParts(part)) else if (category >= 0) fc(cm.categories(category)) else throw new NoSuchElementException("Empty selection mapping") } def mapSelected[A]( cm: CharacterLibrary, fi: (CMImage) => A, fs: (CMShape) => A, fp: (CMPart) => A, fc: (CMCategory) => A): A = { if (layer >= 0) { val p = cm.categories(category).possibleParts(part) layerSelect match { case SelectImages => fi(p.images(layer)) case SelectShapes => fs(p.shapes(layer)) case SelectNone => throw new UnsupportedOperationException("Positive layer with slect none") } } else if (part >= 0) fp(cm.categories(category).possibleParts(part)) else if (category >= 0) fc(cm.categories(category)) else throw new NoSuchElementException("Empty selection mapping") } def nameSelected(cm: CharacterLibrary) = { mapSelected(cm, _.name, _.partName, _.categoryName) } } object LayersSelector { def apply(layer: CMLayer) = { layer match { case _: CMShape => SelectShapes case _: CMImage => SelectImages } } } sealed trait LayersSelector { def forImages(part: CMPart)(f: (Seq[CMImage]) => Unit) def forShapes(part: CMPart)(f: (Seq[CMShape]) => Unit) def forAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Unit) def mapSelectedImages[A](part: CMPart)(f: (Seq[CMImage]) => Seq[A]): Seq[A] def mapSelectedShapes[A](part: CMPart)(f: (Seq[CMShape]) => Seq[A]): Seq[A] def mapSelectedLayers[A](part: CMPart)(f: (Seq[CMLayer]) => Seq[A]): Seq[A] def updateImages(part: CMPart)(f: (Seq[CMImage]) => Seq[CMLayer]): Seq[CMLayer] def updateShapes(part: CMPart)(f: (Seq[CMShape]) => Seq[CMLayer]): Seq[CMLayer] def updateAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Seq[CMLayer]): (Seq[CMLayer], Seq[CMLayer]) } object SelectImages extends LayersSelector { def forImages(part: CMPart)(f: (Seq[CMImage]) => Unit) = f(part.images) def forShapes(part: CMPart)(f: (Seq[CMShape]) => Unit) = {} def forAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Unit) = f(part.images) def mapSelectedImages[A](part: CMPart)(f: (Seq[CMImage]) => Seq[A]): Seq[A] = f(part.images) def mapSelectedShapes[A](part: CMPart)(f: (Seq[CMShape]) => Seq[A]): Seq[A] = Nil def mapSelectedLayers[A](part: CMPart)(f: (Seq[CMLayer]) => Seq[A]): Seq[A] = f(part.images) def updateImages(part: CMPart)(f: (Seq[CMImage]) => Seq[CMLayer]): Seq[CMLayer] = f(part.images) def updateShapes(part: CMPart)(f: (Seq[CMShape]) => Seq[CMLayer]): Seq[CMLayer] = part.shapes def updateAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Seq[CMLayer]): (Seq[CMLayer], Seq[CMLayer]) = (f(part.images), part.shapes) } object SelectShapes extends LayersSelector { def forImages(part: CMPart)(f: (Seq[CMImage]) => Unit) = {} def forShapes(part: CMPart)(f: (Seq[CMShape]) => Unit) = f(part.shapes) def forAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Unit) = f(part.shapes) def mapSelectedImages[A](part: CMPart)(f: (Seq[CMImage]) => Seq[A]): Seq[A] = Nil def mapSelectedShapes[A](part: CMPart)(f: (Seq[CMShape]) => Seq[A]): Seq[A] = f(part.shapes) def mapSelectedLayers[A](part: CMPart)(f: (Seq[CMLayer]) => Seq[A]): Seq[A] = f(part.shapes) def updateImages(part: CMPart)(f: (Seq[CMImage]) => Seq[CMLayer]): Seq[CMLayer] = part.images def updateShapes(part: CMPart)(f: (Seq[CMShape]) => Seq[CMLayer]): Seq[CMLayer] = f(part.shapes) def updateAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Seq[CMLayer]): (Seq[CMLayer], Seq[CMLayer]) = (part.images, f(part.shapes)) } object SelectNone extends LayersSelector { def forImages(part: CMPart)(f: (Seq[CMImage]) => Unit) = {} def forShapes(part: CMPart)(f: (Seq[CMShape]) => Unit) = {} def forAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Unit) = {} def mapSelectedImages[A](part: CMPart)(f: (Seq[CMImage]) => Seq[A]): Seq[A] = Nil def mapSelectedShapes[A](part: CMPart)(f: (Seq[CMShape]) => Seq[A]): Seq[A] = Nil def mapSelectedLayers[A](part: CMPart)(f: (Seq[CMLayer]) => Seq[A]): Seq[A] = Nil def updateImages(part: CMPart)(f: (Seq[CMImage]) => Seq[CMLayer]): Seq[CMLayer] = part.images def updateShapes(part: CMPart)(f: (Seq[CMShape]) => Seq[CMLayer]): Seq[CMLayer] = part.shapes def updateAnyLayers(part: CMPart)(f: (Seq[CMLayer]) => Seq[CMLayer]): (Seq[CMLayer], Seq[CMLayer]) = (part.images, part.shapes) }
Hgjj/CharViewer
js/src/main/scala/unof/cv/base/charLib/CMAdress.scala
Scala
bsd-3-clause
8,346
/* * Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow.enrich package hadoop package inputs // Java import java.util.UUID // Joda-Time import org.joda.time.DateTimeZone import org.joda.time.format.DateTimeFormat // Scala import scala.util.Try import scala.util.control.NonFatal // Scalaz import scalaz._ import Scalaz._ // Snowplow Utils import com.snowplowanalytics.util.Tap._ // Snowplow Common Enrich import common._ import outputs.EnrichedEvent /** * A loader for Snowplow enriched events - i.e. the * TSV files generated by the Snowplow Enrichment * process. */ object EnrichedEventLoader { private val RedshiftTstampFormat = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSS").withZone(DateTimeZone.UTC) private val FieldCount = 108 private object FieldIndexes { // 0-indexed val collectorTstamp = 3 val eventId = 6 val contexts = 52 val unstructEvent = 58 val derivedContexts = 122 val eventFingerprint = 129 } /** * Converts the source string into a * ValidatedEnrichedEvent. Note that * this loads the bare minimum required * for shredding - basically four fields. * * @param line A line of data to convert * @return either a set of validation * Failures or a EnrichedEvent * Success. */ // TODO: potentially in the future this could be replaced by some // kind of Scalding pack() def toEnrichedEvent(line: String): ValidatedEnrichedEvent = { val fields = line.split("\t", -1).map(f => if (f == "") null else f) val len = fields.length if (len < FieldCount) return s"Line does not match Snowplow enriched event (expected ${FieldCount}+ fields; found $len)".failNel[EnrichedEvent] val event = new EnrichedEvent().tap { e => e.contexts = fields(FieldIndexes.contexts) e.unstruct_event = fields(FieldIndexes.unstructEvent) // Backward compatibility with old TSVs without a derived_contexts field if (fields.size >= FieldIndexes.derivedContexts + 1) { e.derived_contexts = fields(FieldIndexes.derivedContexts) } // Backward compatibility with old TSVs without a event_fingerprint field if (fields.size >= FieldIndexes.eventFingerprint + 1) { e.event_fingerprint = fields(FieldIndexes.eventFingerprint) } } // Get and validate the event ID val eventId = validateUuid("event_id", fields(FieldIndexes.eventId)) for (id <- eventId) { event.event_id = id } // Get and validate the collector timestamp val collectorTstamp = validateTimestamp("collector_tstamp", fields(FieldIndexes.collectorTstamp)) for (tstamp <- collectorTstamp) { event.collector_tstamp = tstamp } (eventId.toValidationNel |@| collectorTstamp.toValidationNel) { (_,_) => event } } /** * Validates that the given field contains a valid UUID. * * @param field The name of the field being validated * @param str The String hopefully containing a UUID * @return a Scalaz ValidatedString containing either * the original String on Success, or an error * String on Failure. */ private def validateUuid(field: String, str: String): ValidatedString = { def check(s: String)(u: UUID): Boolean = u != null && s == u.toString val uuid = Try(UUID.fromString(str)).toOption.filter(check(str)) uuid match { case Some(_) => str.success case None => s"Field [$field]: [$str] is not a valid UUID".fail } } /** * Validates that the given field contains a valid * (Redshift/Postgres-compatible) timestamp. * * @param field The name of the field being validated * @param str The String hopefully containing a * Redshift/PG-compatible timestamp * @return a Scalaz ValidatedString containing either * the original String on Success, or an error * String on Failure. */ private def validateTimestamp(field: String, str: String): ValidatedString = try { val _ = RedshiftTstampFormat.parseDateTime(str) str.success } catch { case NonFatal(e) => s"Field [$field]: [$str] is not in the expected Redshift/Postgres timestamp format".fail } }
simplybusiness/snowplow-fork
3-enrich/scala-hadoop-shred/src/main/scala/com.snowplowanalytics.snowplow.enrich/hadoop/inputs/EnrichedEventLoader.scala
Scala
apache-2.0
4,881
package net.sansa_stack.rdf.spark.io import com.esotericsoftware.kryo.Kryo import de.javakaffee.kryoserializers.guava.HashMultimapSerializer import net.sansa_stack.rdf.common.kryo.jena.JenaKryoSerializers._ import org.apache.spark.serializer.KryoRegistrator /** * Created by nilesh on 01/06/2016. */ class JenaKryoRegistrator extends KryoRegistrator { override def registerClasses(kryo: Kryo) { HashMultimapSerializer.registerSerializers(kryo); // Partitioning kryo.register(classOf[net.sansa_stack.rdf.common.partition.core.RdfPartitionDefault]) kryo.register(classOf[Array[net.sansa_stack.rdf.common.partition.core.RdfPartitionDefault]]) kryo.register(classOf[org.apache.jena.graph.Node], new NodeSerializer) kryo.register(classOf[Array[org.apache.jena.graph.Node]], new NodeSerializer) kryo.register(classOf[org.apache.jena.sparql.core.Var], new VarSerializer) kryo.register(classOf[org.apache.jena.sparql.expr.Expr], new ExprSerializer) kryo.register(classOf[org.apache.jena.graph.Node_Variable], new VariableNodeSerializer) kryo.register(classOf[org.apache.jena.graph.Node_Blank], new NodeSerializer) kryo.register(classOf[org.apache.jena.graph.Node_ANY], new ANYNodeSerializer) kryo.register(classOf[org.apache.jena.graph.Node_URI], new NodeSerializer) kryo.register(classOf[org.apache.jena.graph.Node_Literal], new NodeSerializer) kryo.register(classOf[org.apache.jena.graph.Triple], new TripleSerializer) kryo.register(classOf[Array[org.apache.jena.graph.Triple]]) kryo.register(classOf[scala.collection.mutable.WrappedArray.ofRef[_]]) } }
SANSA-Stack/Spark-RDF
sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/io/JenaKryoRegistrator.scala
Scala
gpl-3.0
1,623
package com.shoppingcart.model import com.shoppingcart.model.Offers.Offer /** * Created by prasadsriramula on 19/08/2016. */ class CheckOutSystem(val items: List[CatalogueItem]) object CheckOutSystem{ def amount(checkOutSystem: CheckOutSystem)(offers: List[Offer] = List.empty[Offer]) = { val totalPrice = checkOutSystem.items.foldLeft(0.0)((acc, item) => acc + item.price) if(offers.isEmpty) totalPrice else "%015.2f".format(offers.foldLeft(totalPrice)((acc, offer) => acc - offer(checkOutSystem))).toDouble } }
psriramula/ScalaShoppingCart
src/main/scala/com/shoppingcart/model/CheckOutSystem.scala
Scala
gpl-3.0
540
/* Copyright 2013 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.summingbird.online import com.twitter.storehaus.ReadableStore import com.twitter.util.{Future, Await} import java.io.{ Closeable, Serializable } // Represents the logic in the flatMap bolts trait FlatMapOperation[-T, +U] extends Serializable with Closeable { def apply(t: T): Future[TraversableOnce[U]] override def close { } /* * maybeFlush may be periodically called to empty any internal state * Not used yet so commented out */ def maybeFlush: Future[TraversableOnce[U]] = Future.value(Seq.empty[U]) /** * TODO: Think about getting an implicit FutureCollector here, in * case we don't want to completely choke on large expansions (and * joins). */ def andThen[V](fmo: FlatMapOperation[U, V]): FlatMapOperation[T, V] = { val self = this // Using the standard "self" at the top of the // trait caused a nullpointerexception after // serialization. I think that Kryo mis-serializes that reference. new FlatMapOperation[T, V] { def apply(t: T) = self(t).flatMap { tr => val next: Seq[Future[TraversableOnce[V]]] = tr.map { fmo.apply(_) }.toIndexedSeq Future.collect(next).map(_.flatten) // flatten the inner } override def maybeFlush = { self.maybeFlush.flatMap{ x: TraversableOnce[U] => val z: IndexedSeq[Future[TraversableOnce[V]]] = x.map(fmo.apply(_)).toIndexedSeq val w: Future[Seq[V]] = Future.collect(z).map(_.flatten) for { ws <- w maybes <- fmo.maybeFlush maybeSeq = maybes.toSeq } yield ws ++ maybeSeq } } override def close { self.close; fmo.close } } } } class FunctionFlatMapOperation[T, U](@transient fm: T => TraversableOnce[U]) extends FlatMapOperation[T, U] { val boxed = Externalizer(fm) def apply(t: T) = Future.value(boxed.get(t)) } class GenericFlatMapOperation[T, U](@transient fm: T => Future[TraversableOnce[U]]) extends FlatMapOperation[T, U] { val boxed = Externalizer(fm) def apply(t: T) = boxed.get(t) } class FunctionKeyFlatMapOperation[K1, K2, V](@transient fm: K1 => TraversableOnce[K2]) extends FlatMapOperation[(K1, V), (K2, V)] { val boxed = Externalizer(fm) def apply(t: (K1, V)) = { Future.value(boxed.get(t._1).map{newK => (newK, t._2)}) } } class IdentityFlatMapOperation[T] extends FlatMapOperation[T, T] { // By default we do the identity function def apply(t: T): Future[TraversableOnce[T]] = Future.value(Some(t)) // But if we are composed with something else, just become it override def andThen[V](fmo: FlatMapOperation[T, V]): FlatMapOperation[T, V] = fmo } object FlatMapOperation { def identity[T]: FlatMapOperation[T, T] = new IdentityFlatMapOperation() def apply[T, U](fm: T => TraversableOnce[U]): FlatMapOperation[T, U] = new FunctionFlatMapOperation(fm) def generic[T, U](fm: T => Future[TraversableOnce[U]]): FlatMapOperation[T, U] = new GenericFlatMapOperation(fm) def keyFlatMap[K1, K2, V](fm: K1 => TraversableOnce[K2]): FlatMapOperation[(K1, V), (K2, V)] = new FunctionKeyFlatMapOperation(fm) def combine[T, K, V, JoinedV](fmSupplier: => FlatMapOperation[T, (K, V)], storeSupplier: () => ReadableStore[K, JoinedV]): FlatMapOperation[T, (K, (V, Option[JoinedV]))] = new FlatMapOperation[T, (K, (V, Option[JoinedV]))] { lazy val fm = fmSupplier lazy val store = storeSupplier() override def apply(t: T) = fm.apply(t).flatMap { trav: TraversableOnce[(K, V)] => val resultList = trav.toSeq // Can't go through this twice val keySet: Set[K] = resultList.map { _._1 }.toSet if (keySet.isEmpty) Future.value(Map.empty) else { // Do the lookup val mres: Map[K, Future[Option[JoinedV]]] = store.multiGet(keySet) val resultFutures = resultList.map { case (k, v) => mres(k).map { k -> (v, _) } }.toIndexedSeq Future.collect(resultFutures).map(_.toMap) } } override def close { fm.close Await.result(store.close) } } def write[T](sinkSupplier: () => (T => Future[Unit])) = new WriteOperation[T](sinkSupplier) } class WriteOperation[T](sinkSupplier: () => (T => Future[Unit])) extends FlatMapOperation[T, T] { lazy val sink = sinkSupplier() override def apply(t: T) = sink(t).map { _ => Some(t) } }
surabhiiyer/summingbird
summingbird-online/src/main/scala/com/twitter/summingbird/online/FlatMapOperation.scala
Scala
apache-2.0
5,050
import org.scalajs.sbtplugin.ScalaJSPlugin.AutoImport._ import sbt.Keys._ import sbt._ object LauncherConfigs { /** ================ React_native task ================ */ val fastOptMobile = Def.taskKey[File]("Generate mobile output file for fastOptJS") lazy val mobileLauncherFast = Seq( artifactPath in Compile in fastOptMobile := baseDirectory.value / "index.ios.js", fastOptMobile in Compile := { val outFile = (artifactPath in Compile in fastOptMobile).value val loaderFile = (resourceDirectory in Compile).value / "loader.js" IO.copyFile(loaderFile, outFile) val fullOutputCode = IO.read((fastOptJS in Compile).value.data) val outString = processRequireFunctionsInFastOpt(fullOutputCode) IO.write(baseDirectory.value / "scalajs-output.js", outString) val launcher = (scalaJSLauncher in Compile).value.data.content IO.append(outFile, launcher) IO.copyFile(outFile, baseDirectory.value / "index.android.js") outFile } ) val fullOptMobile = Def.taskKey[File]("Generate the file given to react native") lazy val mobilelauncherFull = Seq( artifactPath in Compile in fullOptMobile := baseDirectory.value / "index.ios.js", fullOptMobile in Compile := { val outFile = (artifactPath in Compile in fullOptMobile).value val loaderFile = (resourceDirectory in Compile).value / "loader.js" IO.copyFile(loaderFile, outFile) val fullOutputCode = IO.read((fullOptJS in Compile).value.data) val outString = processRequireFunctions(fullOutputCode) IO.write(baseDirectory.value / "scalajs-output.js", outString) val launcher = (scalaJSLauncher in Compile).value.data.content IO.append(outFile, launcher) IO.copyFile(outFile, baseDirectory.value / "index.android.js") outFile } ) /** * react-native prod bundler needs require function without name spaces * @param text * @return */ def processRequireFunctions(text: String): String = { val SJS_NAME_SPACE = "exportsNamespace:" val i = text.indexOf(SJS_NAME_SPACE) + SJS_NAME_SPACE.length val j = text.substring(i).indexOf(";") + i // TODO look for non valid identifier ![_$0-9a-zA-Z] val nameSpace = text.substring(i, j) text.replaceAll(s"$nameSpace.require\\(", "require\\(") } /** * react-native prod bundler needs require function without name spaces * @param text * @return */ def processRequireFunctionsInFastOpt(text: String): String = { text.replaceAll("\\$g.require\\(", "require\\(") } val fullOptRelayMobile = Def.taskKey[File]("Generate the file given to react native relay") lazy val mobileRelayLauncher = Seq( artifactPath in Compile in fullOptRelayMobile := baseDirectory.value / "index.ios.js", fullOptRelayMobile in Compile := { val outFile = (artifactPath in Compile in fullOptRelayMobile).value val loaderFile = (resourceDirectory in Compile).value / "loader.js" IO.copyFile(loaderFile, outFile) val fullOutputCode = IO.read((fullOptJS in Compile).value.data) val outString = processRequireFunctions(fullOutputCode) IO.write(baseDirectory.value / "scalajs-output.js", outString) val launcher = (scalaJSLauncher in Compile).value.data.content IO.append(outFile, launcher) IO.copyFile(outFile, baseDirectory.value / "index.android.js") outFile } ) //=============================== Web =========================================/ val relayWebExamplesAssets = "relay-web-examples/assets" lazy val relayWebExamplesLauncher = Seq( crossTarget in (Compile, fullOptJS) := file(relayWebExamplesAssets), crossTarget in (Compile, fastOptJS) := file(relayWebExamplesAssets), crossTarget in (Compile, packageScalaJSLauncher) := file(relayWebExamplesAssets), artifactPath in (Compile, fastOptJS) := ((crossTarget in (Compile, fastOptJS)).value / ((moduleName in fastOptJS).value + "-opt.js")) ) }
chandu0101/sri-relay
project/LauncherConfigs.scala
Scala
apache-2.0
4,158
package com.atomist.project.common.template import com.atomist.source.ArtifactSource trait MergeToolCreator { def createMergeTool(templateContent: ArtifactSource): MergeTool } class CombinedMergeToolCreator( creators: MergeToolCreator* ) extends MergeToolCreator { override def createMergeTool(templateContent: ArtifactSource): MergeTool = { new CombinedMergeTool(creators.map(_.createMergeTool(templateContent))) } }
atomist/rug
src/main/scala/com/atomist/project/common/template/mergeToolCreator.scala
Scala
gpl-3.0
494
/* * Copyright 2014 Commonwealth Computer Research, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.locationtech.geomesa.core.data import com.google.common.cache.{CacheBuilder, CacheLoader} import com.typesafe.scalalogging.slf4j.Logging import org.geotools.data._ import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureIterator, SimpleFeatureSource} import org.geotools.feature.collection.SortedSimpleFeatureCollection import org.geotools.feature.visitor.{BoundsVisitor, MaxVisitor, MinVisitor} import org.locationtech.geomesa.core.index.QueryHints._ import org.locationtech.geomesa.core.iterators.TemporalDensityIterator.createFeatureType import org.locationtech.geomesa.core.process.knn.KNNVisitor import org.locationtech.geomesa.core.process.proximity.ProximityVisitor import org.locationtech.geomesa.core.process.query.QueryVisitor import org.locationtech.geomesa.core.process.temporalDensity.TemporalDensityVisitor import org.locationtech.geomesa.core.process.tube.TubeVisitor import org.locationtech.geomesa.core.process.unique.AttributeVisitor import org.locationtech.geomesa.core.util.TryLoggingFailure import org.opengis.feature.FeatureVisitor import org.opengis.feature.`type`.Name import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType} import org.opengis.filter.Filter import org.opengis.filter.sort.SortBy import org.opengis.util.ProgressListener trait AccumuloAbstractFeatureSource extends AbstractFeatureSource with Logging with TryLoggingFailure { self => import org.locationtech.geomesa.utils.geotools.Conversions._ val dataStore: AccumuloDataStore val featureName: Name def addFeatureListener(listener: FeatureListener) {} def removeFeatureListener(listener: FeatureListener) {} def getSchema: SimpleFeatureType = getDataStore.getSchema(featureName) def getDataStore: AccumuloDataStore = dataStore def longCount = dataStore.getRecordTableSize(featureName.getLocalPart) // The default behavior for getCount is to use Accumulo to look up the number of entries in // the record table for a feature. // This approach gives a rough upper count for the size of the query results. // For Filter.INCLUDE, this is likely pretty close; all others, it is a lie. // Since users may want *actual* counts, there are two ways to force exact counts. // First, one can set the System property "geomesa.force.count". // Second, there is an EXACT_COUNT query hint. override def getCount(query: Query) = { val exactCount = query.getHints.get(EXACT_COUNT) == java.lang.Boolean.TRUE || System.getProperty("geomesa.force.count") == "true" if (exactCount || longCount == -1) { getFeaturesNoCache(query).features().size } else { longCount match { case _ if longCount > Int.MaxValue => Int.MaxValue case _ => longCount.toInt } } } override def getQueryCapabilities = new QueryCapabilities() { override def isOffsetSupported = false override def isReliableFIDSupported = true override def isUseProvidedFIDSupported = true override def supportsSorting(sortAttributes: Array[SortBy]) = true } protected def getFeaturesNoCache(query: Query): SimpleFeatureCollection = { org.locationtech.geomesa.core.index.setQueryTransforms(query, getSchema) new AccumuloFeatureCollection(self, query) } override def getFeatures(query: Query): SimpleFeatureCollection = tryLoggingFailures(getFeaturesNoCache(query)) override def getFeatures(filter: Filter): SimpleFeatureCollection = getFeatures(new Query(getSchema().getTypeName, filter)) } class AccumuloFeatureSource(val dataStore: AccumuloDataStore, val featureName: Name) extends AccumuloAbstractFeatureSource class AccumuloFeatureCollection(source: SimpleFeatureSource, query: Query) extends DefaultFeatureResults(source, query) { val ds = source.getDataStore.asInstanceOf[AccumuloDataStore] override def getSchema: SimpleFeatureType = if (query.getHints.containsKey(TEMPORAL_DENSITY_KEY)) { createFeatureType(source.getSchema()) } else { org.locationtech.geomesa.core.index.getTransformSchema(query).getOrElse(super.getSchema) } override def accepts(visitor: FeatureVisitor, progress: ProgressListener) = visitor match { // TODO GEOMESA-421 implement min/max iterators case v: MinVisitor => v.setValue(ds.getTimeBounds(query.getTypeName).getStart.toDate) case v: MaxVisitor => v.setValue(ds.getTimeBounds(query.getTypeName).getEnd.toDate) case v: BoundsVisitor => v.reset(ds.getBounds(query)) case v: TubeVisitor => v.setValue(v.tubeSelect(source, query)) case v: ProximityVisitor => v.setValue(v.proximitySearch(source, query)) case v: QueryVisitor => v.setValue(v.query(source, query)) case v: TemporalDensityVisitor => v.setValue(v.query(source, query)) case v: KNNVisitor => v.setValue(v.kNNSearch(source,query)) case v: AttributeVisitor => v.setValue(v.unique(source, query)) case _ => super.accepts(visitor, progress) } override def reader(): FeatureReader[SimpleFeatureType, SimpleFeature] = super.reader() } class CachingAccumuloFeatureCollection(source: SimpleFeatureSource, query: Query) extends AccumuloFeatureCollection(source, query) { lazy val featureList = { // use ListBuffer for constant append time and size val buf = scala.collection.mutable.ListBuffer.empty[SimpleFeature] val iter = super.features while (iter.hasNext) { buf.append(iter.next()) } iter.close() buf } override def features = new SimpleFeatureIterator() { private val iter = featureList.iterator override def hasNext = iter.hasNext override def next = iter.next override def close = {} } override def size = featureList.length } trait CachingFeatureSource extends AccumuloAbstractFeatureSource { self: AccumuloAbstractFeatureSource => private val featureCache = CacheBuilder.newBuilder().build( new CacheLoader[Query, SimpleFeatureCollection] { override def load(query: Query): SimpleFeatureCollection = new CachingAccumuloFeatureCollection(self, query) }) override def getFeatures(query: Query): SimpleFeatureCollection = { // geotools bug in Query.hashCode if (query.getStartIndex == null) { query.setStartIndex(0) } if (query.getSortBy == null) featureCache.get(query) else // Uses mergesort new SortedSimpleFeatureCollection(featureCache.get(query), query.getSortBy) } override def getCount(query: Query): Int = getFeatures(query).size() }
jnh5y/geomesa
geomesa-core/src/main/scala/org/locationtech/geomesa/core/data/AccumuloFeatureSource.scala
Scala
apache-2.0
7,315
package de.uni_potsdam.hpi.coheel.wiki import java.io.{Reader, StringReader, BufferedReader} import javax.xml.stream.{XMLStreamConstants, XMLInputFactory} import de.uni_potsdam.hpi.coheel.programs.CoheelLogger import de.uni_potsdam.hpi.coheel.programs.DataClasses._ import de.uni_potsdam.hpi.coheel.util.Timer import org.apache.commons.lang3.StringEscapeUtils import scala.collection.mutable case class RawWikiPage(pageTitle: String, ns: Int, redirect: String, source: String) { import CoheelLogger._ lazy val isDisambiguation = { val isDisambiguationFromTitle = pageTitle.contains("(disambiguation)") if (isDisambiguationFromTitle) true else { Timer.start("DISAMBIGUATION CHECK") // in case of performance problems: // disambiguation links are always (as seen so far) at the end of the text // maybe this could be used to not scan the entire text val disambiguationRegex = """(?ui)\\{\\{disambiguation.*?\\}\\}""".r val matches = disambiguationRegex.findAllIn(source) // check whether the regex sometimes accidentially matches too much text .map { s => if (s.length > 200) log.warn(s"Disambiguation regex returns long result: $s.") s } .map(_.toLowerCase) .filter { s => !s.contains("disambiguation needed") } Timer.end("DISAMBIGUATION CHECK") matches.nonEmpty } } } /** * Captures the important aspects of a WikiPage for our use case, while still * maintaning connection (via inheritance) to the DBpedia extraction framework. * <br /> * <strong>Definition redirect</strong>:<br /> * A page is seen as a redirect, if it contains the &lt;redirect&gt; tag inside the &lt;page&gt; tag in the wiki * dump. * <br /> * <strong>Definition disambiguation</strong>:<br /> * A page is seen as a disambiguation if one of the following conditions is true: * <ul> * <li> The title contains "(disambiguation)" * <li> The text contains a disambiguation template (not all disambiguation pages contain "(disambiguation)" in the * title, e.g. http://en.wikipedia.org/wiki/Alien * <strong>Definition list</strong>:<br /> * A page is seen as a list if it's title starts with "List of" or "Lists of". * * @param pageTitle The title of the page as a string. * @param ns The namespace of the wiki page. * @param redirect The title of the page this page is redirecting to or null, if it is not a redirect. * @param plainTextStemmed This page's plain text content as array of tokens (stemmed). * @param plainTextUnstemmed This page's plain text content as array of tokens (unstemmed). */ case class WikiPage(pageTitle: String, ns: Int, redirect: String, plainTextStemmed: Array[String], plainTextUnstemmed: Array[String], links: Array[Link], isDisambiguation: Boolean) { val isRedirect: Boolean = this.redirect != RawWikiPage.NO_REDIRECT lazy val isList = pageTitle.startsWith("List of") || pageTitle.startsWith("Lists of") def isNormalPage: Boolean = { !isDisambiguation && !isRedirect && !isList } } /** * Compared to the normal wiki page, this also stores the position of the links and the part of speech tags. */ case class FullInfoWikiPage(pageTitle: String, ns: Int, redirect: String, plainText: mutable.ArrayBuffer[String], tags: mutable.ArrayBuffer[String], links: mutable.Map[Int, Link], isDisambiguation: Boolean) { lazy val isList = pageTitle.startsWith("List of") || pageTitle.startsWith("Lists of") } object RawWikiPage { val NO_REDIRECT = "" /** * Builds a wiki page from the given title and wiki markup source. */ def fromSource(pageTitle: String, source: String): RawWikiPage = { RawWikiPage(pageTitle, 0, "", source) } } class WikiPageReader { val factory = XMLInputFactory.newInstance() def xmlToWikiPages(s: String): Iterator[RawWikiPage] = { val reader = new BufferedReader(new StringReader(s)) xmlToWikiPages(reader) } private var readCounter = 1 def xmlToWikiPages(reader: Reader, pageFilter: String => Boolean = _ => true): Iterator[RawWikiPage] = { new Iterator[RawWikiPage] { var alreadyRead = false var hasMorePages = true val streamReader = factory.createXMLStreamReader(reader) // Values for the current page var pageTitle: String = _ var ns: Int = _ var redirectTitle: String = _ var text: String = _ readNextPage() def readNextPage(): Unit = { Timer.start("XML") redirectTitle = RawWikiPage.NO_REDIRECT var foundNextPage = false var pagePassedFilter = true while (!foundNextPage && streamReader.hasNext) { streamReader.next if (streamReader.getEventType == XMLStreamConstants.START_ELEMENT) { streamReader.getLocalName match { case "text" if pagePassedFilter => text = streamReader.getElementText case "ns" if pagePassedFilter => ns = streamReader.getElementText.toInt case "title" => pageTitle = StringEscapeUtils.unescapeXml(streamReader.getElementText) // check whether the found page passes the page filter // if not, we will search for the next page and test again here pagePassedFilter = pageFilter(pageTitle) case "redirect" if pagePassedFilter => redirectTitle = StringEscapeUtils.unescapeXml(streamReader.getAttributeValue(null, "title")) case "page" if pagePassedFilter => foundNextPage = true case _ => } } } hasMorePages = streamReader.hasNext if (!hasMorePages) reader.close() Timer.end("XML") } def hasNext = hasMorePages def next(): RawWikiPage = { if (!alreadyRead) { alreadyRead = true // log.info(f"Reading $readCounter%4s. wiki file on this node.") readCounter += 1 } readNextPage() RawWikiPage( pageTitle, ns, redirectTitle, text ) } } } }
stratosphere/coheel
src/main/scala/de/uni_potsdam/hpi/coheel/wiki/WikiPageReader.scala
Scala
apache-2.0
6,061
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.yarn import org.apache.spark.util.{MemoryParam, IntParam} import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._ import collection.mutable.ArrayBuffer class ApplicationMasterArguments(val args: Array[String]) { var userJar: String = null var userClass: String = null var primaryPyFile: String = null var primaryRFile: String = null var pyFiles: String = null var userArgs: Seq[String] = Seq[String]() var executorMemory = 1024 var executorCores = 1 var numExecutors = DEFAULT_NUMBER_EXECUTORS parseArgs(args.toList) private def parseArgs(inputArgs: List[String]): Unit = { val userArgsBuffer = new ArrayBuffer[String]() var args = inputArgs while (!args.isEmpty) { // --num-workers, --worker-memory, and --worker-cores are deprecated since 1.0, // the properties with executor in their names are preferred. args match { case ("--jar") :: value :: tail => userJar = value args = tail case ("--class") :: value :: tail => userClass = value args = tail case ("--primary-py-file") :: value :: tail => primaryPyFile = value args = tail case ("--primary-r-file") :: value :: tail => primaryRFile = value args = tail case ("--py-files") :: value :: tail => pyFiles = value args = tail case ("--args" | "--arg") :: value :: tail => userArgsBuffer += value args = tail case ("--num-workers" | "--num-executors") :: IntParam(value) :: tail => numExecutors = value args = tail case ("--worker-memory" | "--executor-memory") :: MemoryParam(value) :: tail => executorMemory = value args = tail case ("--worker-cores" | "--executor-cores") :: IntParam(value) :: tail => executorCores = value args = tail case _ => printUsageAndExit(1, args) } } if (primaryPyFile != null && primaryRFile != null) { System.err.println("Cannot have primary-py-file and primary-r-file at the same time") System.exit(-1) } userArgs = userArgsBuffer.readOnly } def printUsageAndExit(exitCode: Int, unknownParam: Any = null) { if (unknownParam != null) { System.err.println("Unknown/unsupported param " + unknownParam) } System.err.println(""" |Usage: org.apache.spark.deploy.yarn.ApplicationMaster [options] |Options: | --jar JAR_PATH Path to your application's JAR file | --class CLASS_NAME Name of your application's main class | --primary-py-file A main Python file | --primary-r-file A main R file | --py-files PY_FILES Comma-separated list of .zip, .egg, or .py files to | place on the PYTHONPATH for Python apps. | --args ARGS Arguments to be passed to your application's main class. | Multiple invocations are possible, each will be passed in order. | --num-executors NUM Number of executors to start (Default: 2) | --executor-cores NUM Number of cores for the executors (Default: 1) | --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G) """.stripMargin) System.exit(exitCode) } } object ApplicationMasterArguments { val DEFAULT_NUMBER_EXECUTORS = 2 }
andrewor14/iolap
yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala
Scala
apache-2.0
4,254
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package org.apache.toree.kernel.interpreter.pyspark import java.net.URL import org.apache.toree.interpreter.Results.Result import org.apache.toree.interpreter._ import org.apache.toree.kernel.api.KernelLike import org.apache.spark.SparkContext import org.apache.spark.sql.SQLContext import org.slf4j.LoggerFactory import py4j.GatewayServer import scala.concurrent.Await import scala.concurrent.duration._ import scala.tools.nsc.interpreter.{InputStream, OutputStream} /** * Represents an interpreter interface to PySpark. Requires a properly-set * SPARK_HOME, PYTHONPATH pointing to Spark's Python source, and py4j installed * where it is accessible to the Spark Kernel. * */ class PySparkInterpreter( ) extends Interpreter { private val logger = LoggerFactory.getLogger(this.getClass) private var _kernel:KernelLike = _ // TODO: Replace hard-coded maximum queue count /** Represents the state used by this interpreter's Python instance. */ private lazy val pySparkState = new PySparkState(500) /** Represents the bridge used by this interpreter's Python interface. */ private lazy val pySparkBridge = PySparkBridge( pySparkState, _kernel ) /** Represents the interface for Python to talk to JVM Spark components. */ private lazy val gatewayServer = new GatewayServer(pySparkBridge, 0) /** Represents the process handler used for the PySpark process. */ private lazy val pySparkProcessHandler: PySparkProcessHandler = new PySparkProcessHandler( pySparkBridge, restartOnFailure = true, restartOnCompletion = true ) private lazy val pySparkService = new PySparkService( gatewayServer, pySparkBridge, pySparkProcessHandler ) private lazy val pySparkTransformer = new PySparkTransformer /** * Initializes the interpreter. * @param kernel The kernel * @return The newly initialized interpreter */ override def init(kernel: KernelLike): Interpreter = { _kernel = kernel this } // Unsupported (but can be invoked) override def bindSparkContext(sparkContext: SparkContext): Unit = {} // Unsupported (but can be invoked) override def bindSqlContext(sqlContext: SQLContext): Unit = {} /** * Executes the provided code with the option to silence output. * @param code The code to execute * @param silent Whether or not to execute the code silently (no output) * @return The success/failure of the interpretation and the output from the * execution or the failure */ override def interpret(code: String, silent: Boolean): (Result, Either[ExecuteOutput, ExecuteFailure]) = { if (!pySparkService.isRunning) pySparkService.start() val futureResult = pySparkTransformer.transformToInterpreterResult( pySparkService.submitCode(code) ) Await.result(futureResult, Duration.Inf) } /** * Starts the interpreter, initializing any internal state. * @return A reference to the interpreter */ override def start(): Interpreter = { pySparkService.start() this } /** * Stops the interpreter, removing any previous internal state. * @return A reference to the interpreter */ override def stop(): Interpreter = { pySparkService.stop() this } /** * Returns the class loader used by this interpreter. * * @return The runtime class loader used by this interpreter */ override def classLoader: ClassLoader = this.getClass.getClassLoader // Unsupported (but can be invoked) override def lastExecutionVariableName: Option[String] = None // Unsupported (but can be invoked) override def read(variableName: String): Option[AnyRef] = None // Unsupported (but can be invoked) override def completion(code: String, pos: Int): (Int, List[String]) = (pos, Nil) // Unsupported override def updatePrintStreams(in: InputStream, out: OutputStream, err: OutputStream): Unit = ??? // Unsupported override def classServerURI: String = "" // Unsupported override def interrupt(): Interpreter = ??? // Unsupported override def bind(variableName: String, typeName: String, value: Any, modifiers: List[String]): Unit = ??? // Unsupported override def addJars(jars: URL*): Unit = ??? // Unsupported override def doQuietly[T](body: => T): T = ??? }
asorianostratio/incubator-toree
pyspark-interpreter/src/main/scala/org/apache/toree/kernel/interpreter/pyspark/PySparkInterpreter.scala
Scala
apache-2.0
5,115
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server.epoch import java.util.{Map => JMap} import kafka.server.KafkaConfig._ import kafka.server.{BlockingSend, KafkaServer, ReplicaFetcherBlockingSend} import kafka.utils.TestUtils._ import kafka.utils.{Logging, TestUtils} import kafka.zk.ZooKeeperTestHarness import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.requests.EpochEndOffset._ import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.utils.{LogContext, SystemTime} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.ApiKeys import org.junit.Assert._ import org.junit.{After, Test} import org.apache.kafka.common.requests.{EpochEndOffset, OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import scala.collection.JavaConverters._ import scala.collection.Map class LeaderEpochIntegrationTest extends ZooKeeperTestHarness with Logging { var brokers: Seq[KafkaServer] = null val topic1 = "foo" val topic2 = "bar" val t1p0 = new TopicPartition(topic1, 0) val t1p1 = new TopicPartition(topic1, 1) val t1p2 = new TopicPartition(topic1, 2) val t2p0 = new TopicPartition(topic2, 0) val t2p2 = new TopicPartition(topic2, 2) val tp = t1p0 var producer: KafkaProducer[Array[Byte], Array[Byte]] = null @After override def tearDown() { if (producer != null) producer.close() TestUtils.shutdownServers(brokers) super.tearDown() } @Test def shouldAddCurrentLeaderEpochToMessagesAsTheyAreWrittenToLeader() { brokers = (0 to 1).map { id => createServer(fromProps(createBrokerConfig(id, zkConnect))) } // Given two topics with replication of a single partition for (topic <- List(topic1, topic2)) { createTopic(zkClient, topic, Map(0 -> Seq(0, 1)), servers = brokers) } // When we send four messages sendFourMessagesToEachTopic() //Then they should be stamped with Leader Epoch 0 var expectedLeaderEpoch = 0 waitUntilTrue(() => messagesHaveLeaderEpoch(brokers(0), expectedLeaderEpoch, 0), "Leader epoch should be 0") //Given we then bounce the leader brokers(0).shutdown() brokers(0).startup() //Then LeaderEpoch should now have changed from 0 -> 1 expectedLeaderEpoch = 1 waitForEpochChangeTo(topic1, 0, expectedLeaderEpoch) waitForEpochChangeTo(topic2, 0, expectedLeaderEpoch) //Given we now send messages sendFourMessagesToEachTopic() //The new messages should be stamped with LeaderEpoch = 1 waitUntilTrue(() => messagesHaveLeaderEpoch(brokers(0), expectedLeaderEpoch, 4), "Leader epoch should be 1") } @Test def shouldSendLeaderEpochRequestAndGetAResponse(): Unit = { //3 brokers, put partition on 100/101 and then pretend to be 102 brokers = (100 to 102).map { id => createServer(fromProps(createBrokerConfig(id, zkConnect))) } val assignment1 = Map(0 -> Seq(100), 1 -> Seq(101)) TestUtils.createTopic(zkClient, topic1, assignment1, brokers) val assignment2 = Map(0 -> Seq(100)) TestUtils.createTopic(zkClient, topic2, assignment2, brokers) //Send messages equally to the two partitions, then half as many to a third producer = createProducer(getBrokerListStrFromServers(brokers), acks = -1) (0 until 10).foreach { _ => producer.send(new ProducerRecord(topic1, 0, null, "IHeartLogs".getBytes)) } (0 until 20).foreach { _ => producer.send(new ProducerRecord(topic1, 1, null, "OhAreThey".getBytes)) } (0 until 30).foreach { _ => producer.send(new ProducerRecord(topic2, 0, null, "IReallyDo".getBytes)) } producer.flush() val fetcher0 = new TestFetcherThread(sender(from = brokers(2), to = brokers(0))) val epochsRequested = Map(t1p0 -> 0, t1p1 -> 0, t2p0 -> 0, t2p2 -> 0) //When val offsetsForEpochs = fetcher0.leaderOffsetsFor(epochsRequested) //Then end offset should be correct assertEquals(10, offsetsForEpochs(t1p0).endOffset) assertEquals(30, offsetsForEpochs(t2p0).endOffset) //And should get no leader for partition error from t1p1 (as it's not on broker 0) assertTrue(offsetsForEpochs(t1p1).hasError) assertEquals(NOT_LEADER_FOR_PARTITION, offsetsForEpochs(t1p1).error) assertEquals(UNDEFINED_EPOCH_OFFSET, offsetsForEpochs(t1p1).endOffset) //Repointing to broker 1 we should get the correct offset for t1p1 val fetcher1 = new TestFetcherThread(sender(from = brokers(2), to = brokers(1))) val offsetsForEpochs1 = fetcher1.leaderOffsetsFor(epochsRequested) assertEquals(20, offsetsForEpochs1(t1p1).endOffset) } @Test def shouldIncreaseLeaderEpochBetweenLeaderRestarts(): Unit = { //Setup: we are only interested in the single partition on broker 101 brokers = Seq(100, 101).map { id => createServer(fromProps(createBrokerConfig(id, zkConnect))) } def leo() = brokers(1).replicaManager.getReplica(tp).get.logEndOffset.messageOffset TestUtils.createTopic(zkClient, tp.topic, Map(tp.partition -> Seq(101)), brokers) producer = createProducer(getBrokerListStrFromServers(brokers), acks = -1) //1. Given a single message producer.send(new ProducerRecord(tp.topic, tp.partition, null, "IHeartLogs".getBytes)).get var fetcher = new TestFetcherThread(sender(brokers(0), brokers(1))) //Then epoch should be 0 and leo: 1 var offset = fetcher.leaderOffsetsFor(Map(tp -> 0))(tp).endOffset() assertEquals(1, offset) assertEquals(leo(), offset) //2. When broker is bounced brokers(1).shutdown() brokers(1).startup() producer.send(new ProducerRecord(tp.topic, tp.partition, null, "IHeartLogs".getBytes)).get fetcher = new TestFetcherThread(sender(brokers(0), brokers(1))) //Then epoch 0 should still be the start offset of epoch 1 offset = fetcher.leaderOffsetsFor(Map(tp -> 0))(tp).endOffset() assertEquals(1, offset) //Then epoch 2 should be the leo (NB: The leader epoch goes up in factors of 2 - This is because we have to first change leader to -1 and then change it again to the live replica) assertEquals(2, fetcher.leaderOffsetsFor(Map(tp -> 2))(tp).endOffset()) assertEquals(leo(), fetcher.leaderOffsetsFor(Map(tp -> 2))(tp).endOffset()) //3. When broker is bounced again brokers(1).shutdown() brokers(1).startup() producer.send(new ProducerRecord(tp.topic, tp.partition, null, "IHeartLogs".getBytes)).get fetcher = new TestFetcherThread(sender(brokers(0), brokers(1))) //Then Epoch 0 should still map to offset 1 assertEquals(1, fetcher.leaderOffsetsFor(Map(tp -> 0))(tp).endOffset()) //Then Epoch 2 should still map to offset 2 assertEquals(2, fetcher.leaderOffsetsFor(Map(tp -> 2))(tp).endOffset()) //Then Epoch 4 should still map to offset 2 assertEquals(3, fetcher.leaderOffsetsFor(Map(tp -> 4))(tp).endOffset()) assertEquals(leo(), fetcher.leaderOffsetsFor(Map(tp -> 4))(tp).endOffset()) //Adding some extra assertions here to save test setup. shouldSupportRequestsForEpochsNotOnTheLeader(fetcher) } //Appended onto the previous test to save on setup cost. def shouldSupportRequestsForEpochsNotOnTheLeader(fetcher: TestFetcherThread): Unit = { /** * Asking for an epoch not present on the leader should return the * next matching epoch, unless there isn't any, which should return * undefined. */ val epoch1 = Map(t1p0 -> 1) assertEquals(1, fetcher.leaderOffsetsFor(epoch1)(t1p0).endOffset()) val epoch3 = Map(t1p0 -> 3) assertEquals(2, fetcher.leaderOffsetsFor(epoch3)(t1p0).endOffset()) val epoch5 = Map(t1p0 -> 5) assertEquals(-1, fetcher.leaderOffsetsFor(epoch5)(t1p0).endOffset()) } private def sender(from: KafkaServer, to: KafkaServer): BlockingSend = { val endPoint = from.metadataCache.getAliveBrokers.find(_.id == to.config.brokerId).get.brokerEndPoint(from.config.interBrokerListenerName) new ReplicaFetcherBlockingSend(endPoint, from.config, new Metrics(), new SystemTime(), 42, "TestFetcher", new LogContext()) } private def waitForEpochChangeTo(topic: String, partition: Int, epoch: Int): Unit = { TestUtils.waitUntilTrue(() => { brokers(0).metadataCache.getPartitionInfo(topic, partition) match { case Some(m) => m.basePartitionState.leaderEpoch == epoch case None => false } }, "Epoch didn't change") } private def messagesHaveLeaderEpoch(broker: KafkaServer, expectedLeaderEpoch: Int, minOffset: Int): Boolean = { var result = true for (topic <- List(topic1, topic2)) { val tp = new TopicPartition(topic, 0) val leo = broker.getLogManager().getLog(tp).get.logEndOffset result = result && leo > 0 && brokers.forall { broker => broker.getLogManager().getLog(tp).get.logSegments.iterator.forall { segment => if (segment.read(minOffset, None, Integer.MAX_VALUE) == null) { false } else { segment.read(minOffset, None, Integer.MAX_VALUE) .records.batches().iterator().asScala.forall( expectedLeaderEpoch == _.partitionLeaderEpoch() ) } } } } result } private def sendFourMessagesToEachTopic() = { val testMessageList1 = List("test1", "test2", "test3", "test4") val testMessageList2 = List("test5", "test6", "test7", "test8") val producer = TestUtils.createProducer(TestUtils.getBrokerListStrFromServers(brokers), keySerializer = new StringSerializer, valueSerializer = new StringSerializer) val records = testMessageList1.map(m => new ProducerRecord(topic1, m, m)) ++ testMessageList2.map(m => new ProducerRecord(topic2, m, m)) records.map(producer.send).foreach(_.get) producer.close() } /** * Simulates how the Replica Fetcher Thread requests leader offsets for epochs */ private[epoch] class TestFetcherThread(sender: BlockingSend) extends Logging { def leaderOffsetsFor(partitions: Map[TopicPartition, Int]): Map[TopicPartition, EpochEndOffset] = { val request = new OffsetsForLeaderEpochRequest.Builder(ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(), toJavaFormat(partitions)) val response = sender.sendRequest(request) response.responseBody.asInstanceOf[OffsetsForLeaderEpochResponse].responses.asScala } def toJavaFormat(partitions: Map[TopicPartition, Int]): JMap[TopicPartition, Integer] = { partitions.map { case (tp, epoch) => tp -> epoch.asInstanceOf[Integer] }.toMap.asJava } } }
Esquive/kafka
core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala
Scala
apache-2.0
11,474
package filodb.coordinator.client import scala.concurrent.duration._ import com.typesafe.scalalogging.StrictLogging import filodb.coordinator._ import filodb.core._ case object NoClusterActor extends ErrorResponse trait ClusterOps extends ClientBase with StrictLogging { import NodeClusterActor._ /** * Obtains a list of all datasets registered in the cluster for ingestion and querying via * `setupDataset` above or the `SetupDataset` API to `NodeClusterActor`. * @return The list of registered datasets, or Nil if the clusterActor ref is not available */ def getDatasets(timeout: FiniteDuration = 30.seconds): Seq[DatasetRef] = clusterActor.map { ref => Client.actorAsk(ref, ListRegisteredDatasets, timeout) { case refs: Seq[DatasetRef] @unchecked => refs } }.getOrElse(Nil) /** * Obtains the `filodb.coordinator.ShardMapper` instance for a registered dataset, * and thus the current `ShardStatus` for every shard, and a reference to the * `NodeCoordinatorActor` / node address for each shard. * @return Some(shardMapper) if the dataset is registered, None if dataset not found */ def getShardMapper(dataset: DatasetRef, timeout: FiniteDuration = 30.seconds): Option[ShardMapper] = clusterActor.map { ref => Client.actorAsk(ref, GetShardMap(dataset), timeout) { case CurrentShardSnapshot(_, m) => Some(m) case _ => None } }.getOrElse(None) }
tuplejump/FiloDB
coordinator/src/main/scala/filodb.coordinator/client/ClusterOps.scala
Scala
apache-2.0
1,476
package io.buoyant.router.http import com.twitter.finagle.Service import com.twitter.finagle.http.Fields._ import com.twitter.finagle.http.{Request, Response} import com.twitter.util.Future import io.buoyant.router.http.StripHopByHopHeadersFilter.HopByHopHeaders import io.buoyant.test.Awaits import org.scalatest.FunSuite class StripHopByHopHeadersFilterTest extends FunSuite with Awaits { val HopByHopHeaders = Seq( Connection, ProxyAuthenticate, ProxyAuthorization, Te, Trailer, TransferEncoding, Upgrade ) val Ok = Service.mk[Request, Response] { req => Future.value(Response()) } val service = StripHopByHopHeadersFilter.filter andThen Ok test("strips all hop-by-hop headers from request") { val req = Request() HopByHopHeaders.foreach(req.headerMap.set(_, "Some Value")) await(service(req)) assert(req.headerMap.isEmpty) } test("strips all hop-by-hop headers from response") { val nextService = Service.mk[Request, Response] { req => val resp: Response = Response() HopByHopHeaders.foreach(resp.headerMap.set(_, "Some Value")) Future.value(resp) } val filter = StripHopByHopHeadersFilter.filter andThen nextService val resp = await(filter(Request())) assert(resp.headerMap.isEmpty) } test("strips all headers listed in 'Connection' header") { val req = Request() req.headerMap.set("Connection", "Keep-Alive, Foo, Bar") req.headerMap.set("Keep-Alive", "timeout=30") req.headerMap.set("Foo", "abc") req.headerMap.set("Bar", "def") await(service(req)) assert(req.headerMap.isEmpty) } }
denverwilliams/linkerd
router/http/src/test/scala/io/buoyant/router/http/StripHopByHopHeadersFilterTest.scala
Scala
apache-2.0
1,636
package org.scalaide.core.sbtbuilder import org.junit.Test import org.scalaide.core.internal.project.ScalaInstallation.platformInstallation import org.scalaide.core.internal.ScalaPlugin import org.junit.Assert import org.eclipse.core.runtime.Platform class CompilerInterfaceStoreTest { @Test def platformCompilerInterfaceWorks(): Unit = { val store = ScalaPlugin().compilerInterfaceStore store.purgeCache() Assert.assertTrue("successful compiler interface compilation", store.compilerInterfaceFor(platformInstallation)(null).isRight) Assert.assertEquals("Zero hits and one miss", (0, 1), store.getStats) } @Test def platformCompilerInterfaceCachesCompilers(): Unit = { val store = ScalaPlugin().compilerInterfaceStore store.purgeCache() Assert.assertTrue("successful compiler interface compilation", store.compilerInterfaceFor(platformInstallation)(null).isRight) Assert.assertTrue("Second try successful", store.compilerInterfaceFor(platformInstallation)(null).isRight) Assert.assertEquals("One hit and one miss", (1, 1), store.getStats) } }
andrey-ilinykh/scala-ide
org.scala-ide.sdt.core.tests/src/org/scalaide/core/sbtbuilder/CompilerInterfaceStoreTest.scala
Scala
bsd-3-clause
1,096
package chandu0101.scalajs.react.components package semanticui import chandu0101.macros.tojs.JSMacro import japgolly.scalajs.react._ import scala.scalajs.js import scala.scalajs.js.`|` import scala.scalajs.js.annotation.JSName /** * This file is generated - submit issues instead of PR against it */ case class SuiListIcon( onDrag: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, onDragExit: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, onContextMenu: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, onDragOver: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, onMouseUp: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, onMouseDown: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, ref: js.UndefOr[String] = js.undefined, onDragEnter: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, onClick: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, onMouseMove: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, onDragEnd: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, name: js.UndefOr[String] = js.undefined, onDragLeave: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, key: js.UndefOr[String] = js.undefined, onDrop: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, onDragStart: js.UndefOr[ReactDragEventFromHtml => Callback] = js.undefined, className: js.UndefOr[String] = js.undefined, onMouseLeave: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, onDoubleClick: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, verticalAlign: js.UndefOr[SemanticVERTICALALIGNMENTS] = js.undefined, onMouseOut: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, onMouseEnter: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined, onMouseOver: js.UndefOr[ReactMouseEventFromHtml => Callback] = js.undefined ) { def apply() = { val props = JSMacro[SuiListIcon](this) val component = JsComponent[js.Object, Children.None, Null](Sui.ListIcon) component(props) } }
rleibman/scalajs-react-components
core/src/main/scala/chandu0101/scalajs/react/components/semanticui/SuiListIcon.scala
Scala
apache-2.0
2,194
/* * Copyright 2021 ABSA Group Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package za.co.absa.spline.consumer.rest.model import za.co.absa.spline.consumer.service.model.Label object LabelQuery { private val KeyValuesRegexp = "([^:]+):(.*)".r def parse(str: String): Option[Label] = { KeyValuesRegexp .findFirstMatchIn(str) .map(m => Label( name = m.group(1), values = m.group(2).split(',').map(_.trim).filterNot(_.isBlank) ) ) } }
AbsaOSS/spline
consumer-rest-core/src/main/scala/za/co/absa/spline/consumer/rest/model/LabelQuery.scala
Scala
apache-2.0
1,026
package wakfutcp.protocol.messages.server import wakfutcp.protocol.common.SystemConfigType import wakfutcp.protocol.{Codec, ServerMessage} final case class ClientSystemConfigurationMessage( properties: Map[SystemConfigType, String] ) extends ServerMessage { override val id = 2067 } object ClientSystemConfigurationMessage { import Codec._ import cats.syntax.invariant._ implicit val codec: Codec[ClientSystemConfigurationMessage] = block(int, map(int, SystemConfigType.codec, utf8(int))) .imap(apply)(Function.unlift(unapply)) }
OpenWakfu/wakfutcp
protocol/src/main/scala/wakfutcp/protocol/messages/server/ClientSystemConfigurationMessage.scala
Scala
mit
554
/*§ =========================================================================== Chronos =========================================================================== Copyright (C) 2015-2016 Gianluca Costa =========================================================================== Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================================================================== */ package info.gianlucacosta.chronos.interpreter.exceptions class SchedulingException(message: String) extends ExecutionException(message)
giancosta86/Chronos
src/main/scala/info/gianlucacosta/chronos/interpreter/exceptions/SchedulingException.scala
Scala
apache-2.0
1,053
package com.softwaremill.macwire object WireWithCodeGen { def main(args: Array[String]) { for(idx <- 0 until 22) { val tpes = (0 to idx).map(i => ('A' + i).toChar).mkString(",") println(s"def wireWith[$tpes,RES](factory: ($tpes) => RES): RES = macro MacwireMacros.wireWith_impl[RES]") } } }
guersam/macwire
macros/src/test/scala/com/softwaremill/macwire/WireWithCodeGen.scala
Scala
apache-2.0
317