code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* * * __________ ______ __ ________ _ __ ______ * /_ __/ __ \/ _/ / / / / _/ __ \/ | / / / ____/ * / / / /_/ // // / / / / // / / / |/ / / / __ * / / / _, _// // /___/ /____/ // /_/ / /| / / /_/ / * /_/ /_/ |_/___/_____/_____/___/\____/_/ |_/ \____/ * * Copyright (C) 2017 Himchan Park (chan150@dgist.ac.kr) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package kr.acon.util import it.unimi.dsi.fastutil.longs.LongOpenHashBigSet import kr.acon.parser.Parser import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD.rddToPairRDDFunctions import org.apache.spark.sql.SparkSession object PredefinedFunctions { abstract class Function { type t1 = RDD[(Long, LongOpenHashBigSet)] type t2 = Parser def f(e: t1, parser: t2) } private def plotDegree(filename: String, logScaleAxis: String, multipleData: Array[String]*) { import scala.sys.process._ createDatafile(s"${filename}", logScaleAxis, multipleData: _*) createScript(s"${filename}", logScaleAxis, multipleData: _*) s"gnuplot ${filename}.plt" !; } private def createDatafile(filename: String, logScaleAxis: String, multipleData: Array[String]*) { import java.io._ multipleData.zipWithIndex.foreach { case (data, index) => val writer = new PrintWriter(new FileOutputStream(s"${filename}.${index}.dat", false)) data.foreach(x => writer.println(x) ) writer.close() } } private def createScript(filename: String, logScaleAxis: String, multipleData: Array[String]*) { import java.io._ val writer = new PrintWriter(new FileOutputStream(s"${filename}.plt", false)) val plotLineInScript = multipleData.zipWithIndex.map { case (data, index) => val dataFilename = s"${filename}.${index}.dat" s""""${dataFilename}" using 1:2 with points pointtype 5 pointsize 0.5""" }.mkString(", ") val formatInPlot = if (logScaleAxis != "none") s"""set format ${logScaleAxis} "10^{%L}""" else "" val logscaleInPlot = if (logScaleAxis != "none") s"""set logscale ${logScaleAxis}""" else "" val script = s""" set terminal postscript eps enhanced color size 3in,1.5in set output '${filename}.eps' set nokey set yrange [0.75:*] ${formatInPlot} ${logscaleInPlot} plot ${plotLineInScript} """.split("\\n+") script.foreach(x => writer.println(x) ) writer.close() } class PARQUET extends Function { override def f(e: t1, parser: t2) { val sparkSession = SparkSession.getActiveSession.get import sparkSession.implicits._ e.flatMap { case (v, adj) => new Iterator[(Long, Long)] { val it = adj.iterator() override def hasNext: Boolean = it.hasNext override def next(): (Long, Long) = (v, it.nextLong()) } }.toDF("src", "dst").write.parquet(parser.hdfs + parser.file) } } class COUNT extends Function { override def f(e: t1, parser: t2) { e.count } } class NONE extends Function { override def f(e: t1, parser: t2) { println("do nothing") } } class OUT extends Function { override def f(e: t1, parser: t2) { val d = e.flatMap { x => Iterator((x._1, x._2.size64())) }.reduceByKey(_ + _). map(x => (x._2, 1L)).reduceByKey(_ + _). map(x => s"${x._1}\t${x._2}").collect plotDegree(parser.file, parser.xy, d) } } class IN extends Function { override def f(e: t1, parser: t2) { val d = e.flatMap(x => x._2.toLongArray. map { y => (y, 1L) }).reduceByKey(_ + _). map(x => (x._2, 1L)).reduceByKey(_ + _). map(x => s"${x._1}\t${x._2}").collect plotDegree(parser.file, parser.xy, d) } } class BOTH extends Function { override def f(e: t1, parser: t2): Unit = { val ec = e.cache() (new IN).f(ec, parser) (new OUT).f(ec, parser) } } class UNDIRECTED extends Function { override def f(e: t1, parser: t2): Unit = { val d = e.flatMap { x => x._2.toLongArray.map { y => (y, 1L) }.++(List((x._1, x._2.size64()))) }.reduceByKey(_ + _).map(x => (x._2, 1L)). reduceByKey(_ + _).map(x => s"${x._1}\t${x._2}").collect plotDegree(parser.file, parser.xy, d) } } }
chan150/TrillionG
src/main/scala/kr/acon/util/PredefinedFunctions.scala
Scala
apache-2.0
4,884
package org.template.similar import io.prediction.controller.PPreparator import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import org.apache.spark.rdd.RDD class Preparator extends PPreparator[TrainingData, PreparedData] { def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = { new PreparedData( users = trainingData.users, items = trainingData.items, viewEvents = trainingData.viewEvents) } } class PreparedData( val users: RDD[(String, User)], val items: RDD[(String, Item)], val viewEvents: RDD[ViewEvent] ) extends Serializable
TheDataShed/PredictionIO
templates/scala-parallel-similar/src/main/scala/Preparator.scala
Scala
apache-2.0
617
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming.continuous import org.mockito.ArgumentMatchers.{any, eq => eqTo} import org.mockito.InOrder import org.mockito.Mockito.{inOrder, never, verify} import org.scalatest.BeforeAndAfterEach import org.scalatest.mockito.MockitoSugar import org.apache.spark._ import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.sql.LocalSparkSession import org.apache.spark.sql.execution.streaming.continuous._ import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousReadSupport, PartitionOffset} import org.apache.spark.sql.sources.v2.writer.WriterCommitMessage import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWriteSupport import org.apache.spark.sql.test.TestSparkSession class EpochCoordinatorSuite extends SparkFunSuite with LocalSparkSession with MockitoSugar with BeforeAndAfterEach { private var epochCoordinator: RpcEndpointRef = _ private var writeSupport: StreamingWriteSupport = _ private var query: ContinuousExecution = _ private var orderVerifier: InOrder = _ override def beforeEach(): Unit = { val reader = mock[ContinuousReadSupport] writeSupport = mock[StreamingWriteSupport] query = mock[ContinuousExecution] orderVerifier = inOrder(writeSupport, query) spark = new TestSparkSession() epochCoordinator = EpochCoordinatorRef.create(writeSupport, reader, query, "test", 1, spark, SparkEnv.get) } test("single epoch") { setWriterPartitions(3) setReaderPartitions(2) commitPartitionEpoch(0, 1) commitPartitionEpoch(1, 1) commitPartitionEpoch(2, 1) reportPartitionOffset(0, 1) reportPartitionOffset(1, 1) // Here and in subsequent tests this is called to make a synchronous call to EpochCoordinator // so that mocks would have been acted upon by the time verification happens makeSynchronousCall() verifyCommit(1) } test("single epoch, all but one writer partition has committed") { setWriterPartitions(3) setReaderPartitions(2) commitPartitionEpoch(0, 1) commitPartitionEpoch(1, 1) reportPartitionOffset(0, 1) reportPartitionOffset(1, 1) makeSynchronousCall() verifyNoCommitFor(1) } test("single epoch, all but one reader partition has reported an offset") { setWriterPartitions(3) setReaderPartitions(2) commitPartitionEpoch(0, 1) commitPartitionEpoch(1, 1) commitPartitionEpoch(2, 1) reportPartitionOffset(0, 1) makeSynchronousCall() verifyNoCommitFor(1) } test("consequent epochs, messages for epoch (k + 1) arrive after messages for epoch k") { setWriterPartitions(2) setReaderPartitions(2) commitPartitionEpoch(0, 1) commitPartitionEpoch(1, 1) reportPartitionOffset(0, 1) reportPartitionOffset(1, 1) commitPartitionEpoch(0, 2) commitPartitionEpoch(1, 2) reportPartitionOffset(0, 2) reportPartitionOffset(1, 2) makeSynchronousCall() verifyCommitsInOrderOf(List(1, 2)) } test("consequent epochs, a message for epoch k arrives after messages for epoch (k + 1)") { setWriterPartitions(2) setReaderPartitions(2) commitPartitionEpoch(0, 1) commitPartitionEpoch(1, 1) reportPartitionOffset(0, 1) commitPartitionEpoch(0, 2) commitPartitionEpoch(1, 2) reportPartitionOffset(0, 2) reportPartitionOffset(1, 2) // Message that arrives late reportPartitionOffset(1, 1) makeSynchronousCall() verifyCommitsInOrderOf(List(1, 2)) } test("several epochs, messages arrive in order 1 -> 3 -> 4 -> 2") { setWriterPartitions(1) setReaderPartitions(1) commitPartitionEpoch(0, 1) reportPartitionOffset(0, 1) commitPartitionEpoch(0, 3) reportPartitionOffset(0, 3) commitPartitionEpoch(0, 4) reportPartitionOffset(0, 4) commitPartitionEpoch(0, 2) reportPartitionOffset(0, 2) makeSynchronousCall() verifyCommitsInOrderOf(List(1, 2, 3, 4)) } test("several epochs, messages arrive in order 1 -> 3 -> 5 -> 4 -> 2") { setWriterPartitions(1) setReaderPartitions(1) commitPartitionEpoch(0, 1) reportPartitionOffset(0, 1) commitPartitionEpoch(0, 3) reportPartitionOffset(0, 3) commitPartitionEpoch(0, 5) reportPartitionOffset(0, 5) commitPartitionEpoch(0, 4) reportPartitionOffset(0, 4) commitPartitionEpoch(0, 2) reportPartitionOffset(0, 2) makeSynchronousCall() verifyCommitsInOrderOf(List(1, 2, 3, 4, 5)) } private def setWriterPartitions(numPartitions: Int): Unit = { epochCoordinator.askSync[Unit](SetWriterPartitions(numPartitions)) } private def setReaderPartitions(numPartitions: Int): Unit = { epochCoordinator.askSync[Unit](SetReaderPartitions(numPartitions)) } private def commitPartitionEpoch(partitionId: Int, epoch: Long): Unit = { val dummyMessage: WriterCommitMessage = mock[WriterCommitMessage] epochCoordinator.send(CommitPartitionEpoch(partitionId, epoch, dummyMessage)) } private def reportPartitionOffset(partitionId: Int, epoch: Long): Unit = { val dummyOffset: PartitionOffset = mock[PartitionOffset] epochCoordinator.send(ReportPartitionOffset(partitionId, epoch, dummyOffset)) } private def makeSynchronousCall(): Unit = { epochCoordinator.askSync[Long](GetCurrentEpoch) } private def verifyCommit(epoch: Long): Unit = { orderVerifier.verify(writeSupport).commit(eqTo(epoch), any()) orderVerifier.verify(query).commit(epoch) } private def verifyNoCommitFor(epoch: Long): Unit = { verify(writeSupport, never()).commit(eqTo(epoch), any()) verify(query, never()).commit(epoch) } private def verifyCommitsInOrderOf(epochs: Seq[Long]): Unit = { epochs.foreach(verifyCommit) } }
hhbyyh/spark
sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/EpochCoordinatorSuite.scala
Scala
apache-2.0
6,574
package ml.combust.mleap.runtime.transformer.feature import ml.combust.mleap.core.feature.PcaModel import ml.combust.mleap.core.types._ import ml.combust.mleap.runtime.frame.{DefaultLeapFrame, Row} import ml.combust.mleap.tensor.Tensor import org.apache.spark.ml.linalg.{DenseMatrix, Vectors} import org.scalatest.FunSpec /** * Created by hollinwilkins on 10/12/16. */ class PcaSpec extends FunSpec { val schema = StructType(Seq(StructField("test_vec", TensorType(BasicType.Double)))).get val dataset = Seq(Row(Tensor.denseVector(Array(2.0, 1.0, 0.0)))) val frame = DefaultLeapFrame(schema, dataset) val pc = new DenseMatrix(3, 2, Array(1d, -1, 2, 0, -3, 1)) val input = Vectors.dense(Array(2d, 1, 0)) val pca = Pca( shape = NodeShape.feature(inputCol = "test_vec", outputCol = "test_pca"), model = PcaModel(pc)) describe("#transform") { it("extracts the principal components from the input column") { val frame2 = pca.transform(frame).get val data = frame2.dataset(0).getTensor[Double](1).toArray assert(data sameElements Array[Double](1, -3)) } describe("with invalid input column") { val pca2 = pca.copy(shape = NodeShape.feature(inputCol = "bad_input")) it("returns a Failure") { assert(pca2.transform(frame).isFailure) } } } describe("input/output schema") { it("has the correct inputs and outputs") { assert(pca.schema.fields == Seq(StructField("test_vec", TensorType.Double()), StructField("test_pca", TensorType.Double()))) } } }
combust/mleap
mleap-runtime/src/test/scala/ml/combust/mleap/runtime/transformer/feature/PcaSpec.scala
Scala
apache-2.0
1,560
package org.acme; import org.springframework.web.bind.annotation.\{GetMapping, RequestMapping, RestController} @RestController @RequestMapping(Array[String]("{resource.path}")) class {resource.class-name} { @GetMapping def hello() = "{resource.response}" }
quarkusio/quarkus
independent-projects/tools/base-codestarts/src/main/resources/codestarts/quarkus/extension-codestarts/spring-web-codestart/scala/src/main/scala/org/acme/{resource.class-name}.tpl.qute.scala
Scala
apache-2.0
268
package au.com.dius.pact.model import au.com.dius.pact.model.HttpMethod._ import org.json4s._ import au.com.dius.pact.consumer.{ConsumerTestVerification, VerificationResult} import org.json.JSONObject object PactFragmentBuilder { def apply(consumer: Consumer) = { WithConsumer(consumer) } case class WithConsumer(consumer: Consumer) { def hasPactWith(provider: String) = { WithProvider(Provider(provider)) } case class WithProvider(provider: Provider) { def given(state: String) = { InState(Some(state)) } def uponReceiving(description: String) = { InState(None).uponReceiving(description) } case class InState(state: Option[String]) { def uponReceiving(description: String) = { DescribingRequest(consumer, provider, state, description) } } } } case class DescribingRequest(consumer: Consumer, provider: Provider, state: Option[String], description: String, builder: CanBuildPactFragment.Builder = CanBuildPactFragment.firstBuild) extends Optionals { /** * supports java DSL */ def matching(path: String, method: String, query: String, headers: java.util.Map[String, String], body: String, matchers: java.util.Map[String, Any]): DescribingResponse = { import collection.JavaConversions._ matching(path, method, Option(query), optional(headers.toMap), optional(body), optional(matchers.toMap.asInstanceOf[Map[String, Map[String, String]]])) } def matching(path: String, method: String = Get, query: Option[String] = None, headers: Option[Map[String, String]] = None, body: Option[String] = None, matchers: Option[Map[String, Map[String, String]]] = None): DescribingResponse = { DescribingResponse(Request(method, path, query, headers, body, matchers)) } case class DescribingResponse(request: Request) { /** * supports java DSL */ def willRespondWith(status: Int, headers: java.util.Map[String, String], body: String, matchers: JSONObject): PactWithAtLeastOneRequest = { import collection.JavaConversions._ willRespondWith(status, headers.toMap, body, matchers) } def willRespondWith(status:Int = 200, headers: Map[String,String] = Map(), body: String = "", matchers: Map[String, Map[String, String]] = Map()): PactWithAtLeastOneRequest = { builder( consumer, provider, state, Seq(Interaction( description, state, request, Response(status, headers, body, matchers)))) } } } case class PactWithAtLeastOneRequest(consumer: Consumer, provider:Provider, state: Option[String], interactions: Seq[Interaction]) { def uponReceiving(description: String) = { DescribingRequest(consumer, provider, state, description, CanBuildPactFragment.additionalBuild(this)) } def duringConsumerSpec[T](config: MockProviderConfig)(test: => T, verification: ConsumerTestVerification[T]): VerificationResult = { PactFragment(consumer, provider, interactions).duringConsumerSpec(config)(test, verification) } } object CanBuildPactFragment { type Builder = (Consumer, Provider, Option[String], Seq[Interaction]) => PactWithAtLeastOneRequest val firstBuild: Builder = PactWithAtLeastOneRequest.apply def additionalBuild(existing: PactWithAtLeastOneRequest): Builder = (_,_,_,i) => existing.copy(interactions = existing.interactions ++ i) } }
caoquendo/pact-jvm
pact-jvm-consumer/src/main/scala/au/com/dius/pact/model/PactFragmentBuilder.scala
Scala
apache-2.0
3,725
package piecewise import org.specs2._ import piecewise.intervaltree._ class IntervalTreeSpecs extends Specification{def is = s2""" Build empty interval tree ${buildEmpty} Build interval tree with one mebmer ${buildOne} Build interval tree with two members ${buildTwo} Build interval tree with three members ${buildThree} Build interval tree with four members ${buildFour} Build interval tree with five members ${buildFive} Build interval tree with six members ${buildSix} Build interval tree with seven members ${buildSeven} Build interval tree with eight members ${buildEight} Build interval tree with nine members ${buildNine} Build interval tree with ten members ${buildTen} Ten integers fold ${fold} """ def iterator: Iterator[((Double, Double), Int)] = { Iterator.iterate((0.0, 1.0))(t => {val (x0, x1) = t; (x0 + 1.0, x1 + 1.0)}) .zip( Iterator.iterate(0)(i => i + 1) ) } def buildEmpty = { val tree = IntervalTree.buildRight(iterator.take(0), 0) val iter = tree.iterator (tree.size must_== 0) and (iter.size must_== 0) } def buildOne = { val tree = IntervalTree.buildRight(iterator.take(1), 1) val iter = tree.iterator (tree.size must_== 1) and (iter.size must_== 1) } def buildTwo = { val tree = IntervalTree.buildRight(iterator.take(2), 2) val iter = tree.iterator (tree.size must_== 2) and (iter.size must_== 2) } def buildThree = { val tree = IntervalTree.buildRight(iterator.take(3), 3) val iter = tree.iterator (tree.size must_== 3) and (iter.size must_== 3) } def buildFour = { val tree = IntervalTree.buildRight(iterator.take(4), 4) val iter = tree.iterator (tree.size must_== 4) and (iter.size must_== 4) } def buildFive = { val tree = IntervalTree.buildRight(iterator.take(5), 5) val iter = tree.iterator (tree.size must_== 5) and (iter.size must_== 5) } def buildSix = { val tree = IntervalTree.buildRight(iterator.take(6), 6) val iter = tree.iterator (tree.size must_== 6) and (iter.size must_== 6) } def buildSeven = { val tree = IntervalTree.buildRight(iterator.take(7), 7) val iter = tree.iterator (tree.size must_== 7) and (iter.size must_== 7) } def buildEight = { val tree = IntervalTree.buildRight(iterator.take(8), 8) val iter = tree.iterator (tree.size must_== 8) and (iter.size must_== 8) } def buildNine = { val tree = IntervalTree.buildRight(iterator.take(9), 9) val iter = tree.iterator (tree.size must_== 9) and (iter.size must_== 9) } def buildTen = { val tree = IntervalTree.buildRight(iterator.take(10), 10) val iter = tree.iterator (tree.size must_== 10) and (iter.size must_== 10) } def fold = { val tree = IntervalTree.buildRight(iterator.take(10), 10) val sum = iterator.take(10).map(_._2).sum val calcSum = IntervalTree.subIntervalFold(tree, 0.0, 11.0, (l: Double, u: Double, v: Int) => v) calcSum must_== sum } }
daniil-timofeev/gridsplines
piecewise/src/test/scala/piecewise/IntervalTreeSpecs.scala
Scala
apache-2.0
3,110
package org.scalajs.openui5.sap.m import org.scalajs.openui5.sap.ui.core.{URI, ValueState} import org.scalajs.openui5.util.{Settings, SettingsMap, noSettings} import scala.scalajs.js import scala.scalajs.js.annotation.{JSName, ScalaJSDefined} @ScalaJSDefined trait StandardTileSettings extends TileSettings object StandardTileSettings extends StandardTileSettingsBuilder(noSettings) class StandardTileSettingsBuilder(val dict: SettingsMap) extends Settings[StandardTileSettings, StandardTileSettingsBuilder](new StandardTileSettingsBuilder(_)) with StandardTileSetters[StandardTileSettings, StandardTileSettingsBuilder] trait StandardTileSetters[T <: js.Object, B <: Settings[T, _]] extends TileSetters[T, B] { def title(v: String) = setting("title", v) def info(v: String) = setting("info", v) def icon(v: URI) = setting("icon", v) def activeIcon(v: URI) = setting("activeIcon", v) def number(v: String) = setting("number", v) def numberUnit(v: String) = setting("numberUnit", v) def infoState(v: ValueState) = setting("infoState", v) def `type`(v: StandardTileType) = setting("type", v) def iconDensityAware(v: Boolean) = setting("iconDensityAware", v) } @JSName("sap.m.StandardTile") @js.native class StandardTile(id: js.UndefOr[String] = js.native, settings: js.UndefOr[StandardTileSettings] = js.native) extends Tile { def this(id: String) = this(id, js.undefined) def this(settings: StandardTileSettings) = this(js.undefined, settings) }
lastsys/scalajs-openui5
src/main/scala/org/scalajs/openui5/sap/m/StandardTile.scala
Scala
mit
1,500
package io.eels.component.orc import java.io.File import com.sksamuel.exts.metrics.Timed import io.eels.datastream.DataStream import io.eels.schema.StructType import io.eels.{FilePattern, Row} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import scala.util.Random /** * v1.3.0 * 5m rows random contents; 20 parts; reading=1500 */ object OrcMultipleFileSpeedTest extends App with Timed { val size = 5000000 val count = 20 val schema = StructType("a", "b", "c", "d", "e") def createRow = Row(schema, Random.nextBoolean(), Random.nextFloat(), Random.nextGaussian(), Random.nextLong(), Random.nextString(4)) implicit val conf = new Configuration() implicit val fs = FileSystem.getLocal(new Configuration()) val dir = new Path("orc-speed-test") new File(dir.toString).mkdirs() timed("Insertion") { val ds = DataStream.fromIterator(schema, Iterator.continually(createRow).take(size)) new File(dir.toString).listFiles().foreach(_.delete) ds.to(OrcSink(new Path("orc-speed-test/orc_speed.pq")).withOverwrite(true), count) } for (_ <- 1 to 25) { assert(count == FilePattern("orc-speed-test/*").toPaths().size) timed("Reading with OrcSource") { val actual = OrcSource("orc-speed-test/*").toDataStream().map { row => row }.filter(_ => true).size assert(actual == size, s"Expected $size but was $actual") } println("") println("---------") println("") } }
eel-lib/eel
eel-orc/src/test/scala/io/eels/component/orc/OrcMultipleFileSpeedTest.scala
Scala
mit
1,479
package mr.merc.map.view import mr.merc.map.hex.view.{TerrainHexFieldView, TerrainHexView} import mr.merc.map.hex.TerrainHexField import mr.merc.unit.view.SoldierView import mr.merc.view.move.Movement import mr.merc.log.Logging import mr.merc.map.hex.view.TerrainHexFieldView.{BattleFieldViewMode, FieldViewMode} import mr.merc.map.terrain.TerrainKind._ import mr.merc.map.terrain.TerrainType.OceanWater import mr.merc.politics.Province // TODO add update method which handles case when soldiers changed class MapView(field: TerrainHexField, factor: Double, val soldiersDrawer: SoldiersDrawer = new SoldiersDrawer(), mode:FieldViewMode = BattleFieldViewMode) extends Logging { val terrainView = new TerrainHexFieldView(field, soldiersDrawer, factor, mode) if (mode == BattleFieldViewMode) { createSoldiers foreach (soldiersDrawer.addSoldier) } def hexByPixel(x: Int, y: Int):Option[TerrainHexView] = terrainView.hexByPixelCoords(x, y) def provinceByPixel(x: Int, y: Int):Option[Province] = { val hexOpt = hexByPixel(x, y) hexOpt.filterNot(_.hex.terrain == OceanWater).flatMap(_.hex.province) } def soldiers = soldiersDrawer.soldiers private def createSoldiers: List[SoldierView] = { val soldiers = terrainView.realHexes.flatMap(h => { val soldierOption = h.hex.soldier soldierOption match { case Some(soldier) => debug(s"Creating soldier view for soldier type ${soldier.soldierType.name}") val view = new SoldierView(soldier, factor) view.coords = (h.x, h.y) Some(view) case None => None } }) soldiers toList } def update(time: Int): Unit = { soldiersDrawer.update(time) terrainView.hexesToDraw.foreach { hex => if (hex.mapObjectView.exists(_.update(time))) { terrainView.setTerrainDirty(hex) } } } def canvasBattleLayers = terrainView.canvasLayers def addMovement(movement: Movement): Unit = { soldiersDrawer.addMovement(movement) } def pixelWidth = terrainView.pixelWidth def pixelHeight = terrainView.pixelHeight }
RenualdMarch/merc
src/main/scala/mr/merc/map/view/MapView.scala
Scala
gpl-3.0
2,096
// https://leetcode.com/problems/palindrome-number object Solution { def isPalindrome(x: Int): Boolean = { def h(x: Int, y: Int): Boolean = if (x == y || x / 10 == y) true else if (x < y) false else h(x / 10, x % 10 + y * 10) if (x < 0 || (x != 0 && x % 10 == 0)) false else h(x, 0) } }
airt/codegames
leetcode/009-palindrome-number.scala
Scala
mit
324
/* * Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.scaladsl.persistence.slick import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt import com.lightbend.lagom.scaladsl.persistence.{ AggregateEventTag, EventStreamElement, ReadSideProcessor, TestEntity } import scala.concurrent.{ ExecutionContext, Future } import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile trait Tables { val profile: JdbcProfile import profile.api._ implicit val ec: ExecutionContext case class TestCount(id: String, count: Long) class TestCounts(tag: Tag) extends Table[TestCount](tag, "testcounts") { def id = column[String]("id", O.PrimaryKey) def count = column[Long]("count") def * = (id, count) <> (TestCount.tupled, TestCount.unapply) } lazy val testCounts: TableQuery[TestCounts] = TableQuery[TestCounts] def createTable: DBIO[_] = testCounts.schema.create def countUpdate(id: String, diff: Int = 1): DBIO[_] = { val q: Query[TestCounts, TestCount, Seq] = testCounts.filter(_.id === id) for { select <- q.result updated <- select.headOption match { case Some(testCount) => q.update(testCount.copy(count = testCount.count + diff)) case None => testCounts += TestCount(id, diff) } } yield updated } } object SlickTestEntityReadSide { class TestEntityReadSideProcessor(readSide: SlickReadSide, db: Database, val profile: JdbcProfile)(implicit val ec: ExecutionContext) extends ReadSideProcessor[TestEntity.Evt] with Tables { def buildHandler(): ReadSideHandler[TestEntity.Evt] = readSide .builder[TestEntity.Evt]("test-entity-read-side") .setGlobalPrepare(createTable) .setEventHandler(updateCount) .build() def aggregateTags: Set[AggregateEventTag[Evt]] = TestEntity.Evt.aggregateEventShards.allTags def updateCount(event: EventStreamElement[TestEntity.Appended]) = countUpdate(event.entityId, 1) } } class SlickTestEntityReadSide(db: Database, val profile: JdbcProfile)(implicit val ec: ExecutionContext) extends Tables { import profile.api._ def getAppendCount(id: String): Future[Long] = db.run { testCounts.filter(_.id === id) .map(_.count) .result .headOption .map(_.getOrElse(0l)) } }
edouardKaiser/lagom
persistence-jdbc/scaladsl/src/test/scala/com/lightbend/lagom/scaladsl/persistence/slick/SlickTestEntityReadSide.scala
Scala
apache-2.0
2,425
import scala.reflect.runtime.universe._ import scala.tools.reflect.Eval object Test extends App { { val x = 42 def foo() = reify{ val y = x; reify(y) }; { val x = 2 val code1 = foo() val code2 = code1.eval println(code2.eval) } } }
lampepfl/dotty
tests/disabled/macro/run/reify_newimpl_38.scala
Scala
apache-2.0
277
package mesosphere.marathon package api.akkahttp import akka.http.scaladsl.model.Uri.Path import mesosphere.marathon.core.task.Task import mesosphere.marathon.state.{ Group, PathId, RootGroup, Timestamp } import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.PathMatcher.{ Matched, Unmatched } import akka.http.scaladsl.server.PathMatcher1 import mesosphere.marathon.state.PathId._ import scala.annotation.tailrec object PathMatchers { import akka.http.scaladsl.server.PathMatcher._ /** * Matches the remaining path and transforms it into task id */ final val RemainingTaskId = Remaining.map(s => Task.Id(s)) /** * Tries to match the remaining path as Timestamp */ final val Version = Segment.flatMap(string => try Some(Timestamp(string)) catch { case _: IllegalArgumentException => None } ) /** * All these keywords are not allowed in the application/pod/group name */ private val marathonApiKeywords = Set("restart", "tasks", "versions", "delay", "apps") /** * Very similar to AppPathIdLike with only difference of how the result should look like when no group id is actually found. * In that case, it fallbacks to use root group as a default */ case object GroupPathIdLike extends PathMatcher1[PathId] { def iter(reversePieces: List[String], remaining: Path, consumedSlash: Option[Path] = None): Matching[Tuple1[PathId]] = AppPathIdLike.iter(reversePieces, remaining, consumedSlash, (consumedSlash, remaining) => Matched(consumedSlash.getOrElse(remaining), Tuple1("/".toRootPath))) def iter(remaining: Path): Matching[Tuple1[PathId]] = iter(Nil, remaining) override def apply(path: Path): Matching[Tuple1[PathId]] = iter(path) } /** * Matches and extracts the force parameter. */ val forceParameter = parameter('force.as[Boolean].?(false)) /** * Matches everything what's coming before api keywords as PathId */ case object AppPathIdLike extends PathMatcher1[PathId] { @tailrec def iter(reversePieces: List[String], remaining: Path, consumedSlash: Option[Path] = None, onEmpty: (Option[Path], Path) => Matching[Tuple1[PathId]] = (_, _) => Unmatched): Matching[Tuple1[PathId]] = remaining match { case slash @ Path.Slash(rest) => iter(reversePieces, rest, Some(slash), onEmpty) case Path.Segment(segment, rest) if !marathonApiKeywords(segment) => iter(segment :: reversePieces, rest, onEmpty = onEmpty) case _ if reversePieces.isEmpty => onEmpty(consumedSlash, remaining) case remaining => Matched( consumedSlash.getOrElse(remaining), Tuple1(PathId.sanitized(reversePieces.reverse))) } def iter(remaining: Path): Matching[Tuple1[PathId]] = iter(Nil, remaining) override def apply(path: Path): Matching[Tuple1[PathId]] = iter(path) } /** * Matches anything until ::. The remaining path will be :: plus everything that follows :: in the original path. * The matched path up until :: will be extracted. * * Note: This makes the use of :: illegal in a pods path. */ case object PodsPathIdLike extends PathMatcher1[String] { // Simple reg ex that matches anything before and after :: val keywordMatcher = "^(.*)::(.*)$".r @tailrec def iter(accumulatedPathId: String, remaining: Path): Matching[Tuple1[String]] = remaining match { case Path.Slash(rest) => if (rest.isEmpty) Unmatched else iter(accumulatedPathId + "/", rest) case Path.Segment(segment, rest) => segment match { case keywordMatcher(before, keyword) => Matched(s"::$keyword" :: rest, Tuple1(accumulatedPathId + before)) case _ => iter(accumulatedPathId + segment, rest) } case _ => Matched(remaining, Tuple1(accumulatedPathId)) } def iter(remaining: Path): Matching[Tuple1[String]] = iter("", remaining) override def apply(path: Path) = iter(path) } /** * Given the current root group, only match and consume an existing appId * * This is useful because our v2 API has an unfortunate design decision which leads to ambiguity in our URLs, such as: * * POST /v2/apps/my-group/restart/restart * * The intention here is to restart the app named "my-group/restart" * * Given the url above, this matcher will only consume "my-group/restart" from the path, * leaving the rest of the matcher to match the rest */ case class ExistingRunSpecId(rootGroup: () => RootGroup) extends PathMatcher1[PathId] { import akka.http.scaladsl.server.PathMatcher._ @tailrec final def iter(collected: Vector[String], remaining: Path, group: Group): Matching[Tuple1[PathId]] = remaining match { case Path.Slash(rest) => iter(collected, rest, group) case Path.Segment(segment, rest) => val rawPathId = collected :+ segment val pathId = PathId.sanitized(rawPathId, true) group.app(pathId) match { case Some(_) => Matched(rest, Tuple1(pathId)) case None => iter(rawPathId, rest, group) } case _ => Unmatched } override def apply(path: Path) = iter(Vector.empty, path, rootGroup()) } /** * Path matcher, that matches a segment only, if it is defined in the given set. * @param set the allowed path segments. */ class PathIsAvailableInSet(set: Set[String]) extends PathMatcher1[String] { def apply(path: Path) = path match { case Path.Segment(segment, tail) if set(segment) => Matched(tail, Tuple1(segment)) case _ => Unmatched } } }
janisz/marathon
src/main/scala/mesosphere/marathon/api/akkahttp/PathMatchers.scala
Scala
apache-2.0
5,641
package japgolly.microlibs.stdlib_ext // ********** // * * // * JS * // * * // ********** import java.lang.{StringBuilder => JStringBuilder} trait PlatformSpecificEscapeUtils { self: EscapeUtils.type => override def quote(s: String): String = scala.scalajs.js.JSON.stringify(s) override def escape(s: String): String = if (s == null) null else { val q = quote(s) q.substring(1, q.length - 1) } override def appendQuoted(sb: JStringBuilder, s: String): Unit = { sb.append(quote(s)) () } override def appendEscaped(sb: JStringBuilder, s: String): Unit = { sb.append(escape(s)) () } override def appendQuoted(sb: StringBuilder, s: String): Unit = { sb.append(quote(s)) () } override def appendEscaped(sb: StringBuilder, s: String): Unit = { sb.append(escape(s)) () } }
japgolly/microlibs-scala
stdlib-ext/js/src/main/scala/japgolly/microlibs/stdlib_ext/PlatformSpecificEscapeUtils.scala
Scala
apache-2.0
880
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.bwsw.sj.common import com.bwsw.common.JsonSerializer import com.bwsw.common.http.HttpClientBuilder import com.bwsw.sj.common.config.SettingsUtils import com.bwsw.sj.common.dal.repository.ConnectionRepository import com.bwsw.sj.common.si._ import com.bwsw.sj.common.si.model.FileMetadataCreator import com.bwsw.sj.common.si.model.config.ConfigurationSettingCreator import com.bwsw.sj.common.si.model.instance.InstanceCreator import com.bwsw.sj.common.si.model.module.ModuleMetadataCreator import com.bwsw.sj.common.si.model.provider.ProviderCreator import com.bwsw.sj.common.si.model.service.ServiceCreator import com.bwsw.sj.common.si.model.stream.StreamCreator import com.bwsw.sj.common.utils.{MessageResourceUtils, SpecificationUtils} import scaldi.Module class SjModule extends Module { bind[MessageResourceUtils] to new MessageResourceUtils bind[SpecificationUtils] to new SpecificationUtils bind[SettingsUtils] to new SettingsUtils val mongoAuthChecker = new MongoAuthChecker(ConnectionConstants.mongoHosts, ConnectionConstants.databaseName) bind[ConnectionRepository] to new ConnectionRepository( mongoAuthChecker, ConnectionConstants.mongoHosts, ConnectionConstants.databaseName, ConnectionConstants.mongoUser, ConnectionConstants.mongoPassword) bind[ProviderCreator] to new ProviderCreator bind[ServiceCreator] to new ServiceCreator bind[StreamCreator] to new StreamCreator bind[FileMetadataCreator] to new FileMetadataCreator bind[ModuleMetadataCreator] to new ModuleMetadataCreator bind[InstanceCreator] to new InstanceCreator bind[ConfigurationSettingCreator] to new ConfigurationSettingCreator bind[ConfigSettingsSI] to new ConfigSettingsSI bind[ProviderSI] to new ProviderSI bind[ServiceSI] to new ServiceSI bind[StreamSI] to new StreamSI bind[CustomFilesSI] to new CustomFilesSI bind[CustomJarsSI] to new CustomJarsSI bind[ModuleSI] to new ModuleSI bind[InstanceSI] to new InstanceSI bind[FileBuffer] toProvider new FileBuffer bind[JsonSerializer] toProvider new JsonSerializer(ignoreUnknown = true) bind[HttpClientBuilder] to new HttpClientBuilder } object SjModule { implicit lazy val module = new SjModule implicit lazy val injector = module.injector }
bwsw/sj-platform
core/sj-common/src/main/scala/com/bwsw/sj/common/SjModule.scala
Scala
apache-2.0
3,068
package utils import scala.slick.driver.PostgresDriver.simple._ import scala.util.Try object DB { class Tag_db(tag: Tag) extends Table[(Int, String)](tag, "tag") { def tag_id = column[Int]("tag_id") def tag_text = column[String]("tag_text") def * = (tag_id, tag_text) } class Article_db(tag: Tag) extends Table[(Int, String)](tag, "article") { def article_id = column[Int]("article_id") def article_url = column[String]("article_url") def * = (article_id, article_url) } class TagArticle_db(tag: Tag) extends Table[(Int, Int)](tag, "tag_article") { def tag_id = column[Int]("tag_id") def article_id = column[Int]("article_id") def * = (tag_id, article_id) } // DRY initialize once val connectionUrl = sys.env("JDBC_DATABASE_URL").toString() // local testing: "jdbc:postgresql://localhost:5432/tproger_bot?user=postgres&password=root" val tag = TableQuery[Tag_db] val article = TableQuery[Article_db] val tag_article = TableQuery[TagArticle_db] /** * Add record to many-to-many auxiliary table tag_article. * Query can be aborted if one of constraints violated * @param tag_text tag_text column references to tag table * @param article_url article_url column references to article table */ def addNewTagArticle(tag_text: String, article_url: String): Unit = { Database.forURL(connectionUrl, driver = "org.postgresql.Driver") withSession { implicit session => // Error is normal - if tag is exists Try(tag.map(c => c.tag_text) += (tag_text)) val last_tag_q = tag filter (_.tag_text === tag_text) map (_.tag_id) val last_tag_id = last_tag_q.list.head // Error is normal - if article is exists Try(article.map(c => c.article_url) += (article_url)) val last_article_q = article filter (_.article_url === article_url) map (_.article_id) val last_article_id = last_article_q.list.head // Error is normal - if pair tag + article is exists Try(tag_article.map(c => (c.tag_id, c.article_id)) += (last_tag_id, last_article_id)) } } /** * Returns all articles by tag * @param u_tag tag for search * @return list of articles url(article_url column of article table) */ def getArticlesByTag(u_tag: String): List[String] = { Database.forURL(connectionUrl, driver = "org.postgresql.Driver") withSession { implicit session => val req = for { t <- tag if t.tag_text === u_tag t_a <- tag_article if t_a.tag_id === t.tag_id a <- article if a.article_id === t_a.article_id } yield (a.article_url) req.list } } }
heroys6/tproger_bot
app/utils/DB.scala
Scala
mit
2,674
package org.scalatest.test.testsub1 import org.scalatest._ class TestSpec2 extends FunSpec { describe("TestSpec2") { it("test 1") {} } }
dotty-staging/scalatest
scalatest-test/src/test/scala/org/scalatest/test/testsub1/TestSpec2.scala
Scala
apache-2.0
151
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn.keras import com.intel.analytics.bigdl.Criterion import com.intel.analytics.bigdl.nn._ import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, DataFormat} import com.intel.analytics.bigdl.optim._ import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import scala.reflect.ClassTag object KerasUtils { private[keras] def getPadsFromBorderMode(borderMode: String = "valid"): (Int, Int) = { if (borderMode == "same") { // padH, padW (-1, -1) } else { (0, 0) } } private[bigdl] def getInitMethod(init: String): InitializationMethod = { init.toLowerCase() match { case "glorot_uniform" => Xavier case "one" => Ones case "zero" => Zeros case "uniform" => RandomUniform(-0.05, 0.05) case "normal" => RandomNormal(0.0, 0.05) case _ => throw new IllegalArgumentException(s"Unsupported initialization method: " + s"${init.toLowerCase()}") } } private[bigdl] def getKerasActivation[T : ClassTag] (activation: String) (implicit ev: TensorNumeric[T]): KerasLayer[Tensor[T], Tensor[T], T] = { if (activation == null) { return null } if (activation.toLowerCase() == "softmax") { SoftMax[T]() } else { val torchActivation = getTorchActivation(activation) new KerasIdentityWrapper[T](torchActivation) .asInstanceOf[KerasLayer[Tensor[T], Tensor[T], T]] } } private[keras] def getTorchActivation[T : ClassTag] (activation: String) (implicit ev: TensorNumeric[T]): AbstractModule[Tensor[T], Tensor[T], T] = { if (activation == null) null else { activation.toLowerCase() match { case "tanh" => Tanh[T]() case "sigmoid" => Sigmoid[T]() case "relu" => ReLU[T]() case "softmax" => com.intel.analytics.bigdl.nn.SoftMax[T]() case "softplus" => SoftPlus[T]() case "softsign" => SoftSign[T]() case "hard_sigmoid" => HardSigmoid[T]() case _ => throw new IllegalArgumentException(s"Invalid activation: " + s"${activation.toLowerCase}. Only simple activations can be constructed using string") } } } private[keras] def computeConvOutputLength( inputLength: Int, filterSize: Int, borderMode: String, stride: Int, dilation: Int = 1): Int = { val dilatedFilterSize = filterSize + (filterSize - 1) * (dilation - 1) val outputLength = borderMode match { case "valid" => inputLength - dilatedFilterSize + 1 case "same" => inputLength } (outputLength + stride - 1) / stride } private[keras] def getPadsFromBorderMode3D( borderMode: String = "valid"): (Int, Int, Int) = { if (borderMode == "same") { // padT, padH, padW (-1, -1, -1) } else { (0, 0, 0) } } private[bigdl] def toBigDLFormat(dimOrdering: String): DataFormat = { require(dimOrdering.toLowerCase() == "tf" || dimOrdering.toLowerCase() == "th", s"Dim ordering must be either tf or th, but got ${dimOrdering.toLowerCase()}") dimOrdering.toLowerCase() match { case "tf" => DataFormat.NHWC case "th" => DataFormat.NCHW } } private[bigdl] def toBigDLFormat5D(dimOrdering: String): String = { require(dimOrdering.toLowerCase() == "tf" || dimOrdering.toLowerCase() == "th", s"Dim ordering must be either tf or th, but got ${dimOrdering.toLowerCase()}") dimOrdering.toLowerCase() match { case "tf" => "CHANNEL_LAST" case "th" => "CHANNEL_FIRST" } } private[keras] def toBigDLCriterion[T : ClassTag](loss: String) (implicit ev: TensorNumeric[T]): Criterion[T] = { loss.toLowerCase() match { case "binary_crossentropy" => BCECriterion[T]() case "categorical_crossentropy" => CategoricalCrossEntropy[T]() case "mse" => MSECriterion[T]() case "mean_squared_error" => MSECriterion[T]() case "mae" => AbsCriterion[T]() case "mean_absolute_error" => AbsCriterion[T]() case "hinge" => MarginCriterion[T]() case "mape" => MeanAbsolutePercentageCriterion[T]() case "mean_absolute_percentage_error" => MeanAbsolutePercentageCriterion[T]() case "msle" => MeanSquaredLogarithmicCriterion[T]() case "mean_squared_logarithmic_error" => MeanSquaredLogarithmicCriterion[T]() case "squared_hinge" => MarginCriterion[T](squared = true) case "sparse_categorical_crossentropy" => ClassNLLCriterion[T](logProbAsInput = false) case "kld" => KullbackLeiblerDivergenceCriterion[T]() case "kullback_leibler_divergence" => KullbackLeiblerDivergenceCriterion[T]() case "cosine_proximity" => CosineProximityCriterion[T]() case "poisson" => PoissonCriterion[T]() case _ => throw new IllegalArgumentException(s"Invalid loss: ${loss.toLowerCase()}") } } private[keras] def toBigDLOptimMethod[T: ClassTag](optimMethod: String) (implicit ev: TensorNumeric[T]): OptimMethod[T] = { optimMethod.toLowerCase() match { case "sgd" => new SGD[T](learningRate = 0.01) case "rmsprop" => new RMSprop[T](learningRate = 0.001, decayRate = 0.9) case "adamax" => new Adamax[T](Epsilon = 1e-8) case "adagrad" => new Adagrad[T](learningRate = 0.01) case "adadelta" => new Adadelta[T](decayRate = 0.95, Epsilon = 1e-8) case "adam" => new Adam[T]() } } private[keras] def toBigDLMetrics[T: ClassTag](metrics: Array[String]) (implicit ev: TensorNumeric[T]): Array[ValidationMethod[T]] = { if (metrics == null) { null } else if (metrics.sameElements(Array("accuracy"))) { Array(new Top1Accuracy[T]()) } else { throw new IllegalArgumentException(s"Unsupported metrics: ${metrics.mkString(", ")}") } } }
yiheng/BigDL
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/keras/KerasUtils.scala
Scala
apache-2.0
6,454
/* * Copyright 2015 data Artisans GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dataartisans.flink_demo.datatypes import java.util.Locale import org.joda.time.DateTime import org.joda.time.format.{DateTimeFormat, DateTimeFormatter} /** * A TaxiRide describes a taxi ride event. * There are two types of events, a taxi ride start event and a taxi ride end event. * The isStart flag specifies the type of the event. * * @param rideId The id of the ride. There are two events for each id. A start and an end event. * @param time The time at which the event occured * @param isStart Flag indicating the type of the event (start or end) * @param location The location at which the event occurred. Either pick-up or drop-off location. * @param passengerCnt The number of passengers on the taxi ride * @param travelDist The total traveled distance for end events, -1 for start events. */ class TaxiRide( var rideId: Long, var time: DateTime, var isStart: Boolean, var location: GeoPoint, var passengerCnt: Short, var travelDist: Float) { def this() { this(0, new DateTime(0), false, new GeoPoint(0.0, 0.0), 0, 0.0f) } override def toString: String = { val sb: StringBuilder = new StringBuilder sb.append(rideId).append(",") sb.append(time.toString(TaxiRide.TimeFormatter)).append(",") sb.append(if (isStart) "START" else "END").append(",") sb.append(location.lon).append(",") sb.append(location.lat).append(",") sb.append(passengerCnt).append(",") sb.append(travelDist) sb.toString() } } object TaxiRide { @transient private final val TimeFormatter: DateTimeFormatter = DateTimeFormat.forPattern("yyyy-MM-DD HH:mm:ss").withLocale(Locale.US).withZoneUTC def fromString(line: String): TaxiRide = { val tokens: Array[String] = line.split(",") if (tokens.length != 7) { throw new RuntimeException("Invalid record: " + line) } try { val rideId = tokens(0).toLong val time = DateTime.parse(tokens(1), TimeFormatter) val isStart = tokens(2) == "START" val lon = if (tokens(3).length > 0) tokens(3).toDouble else 0.0 val lat = if (tokens(4).length > 0) tokens(4).toDouble else 0.0 val passengerCnt = tokens(5).toShort val travelDistance = if (tokens(6).length > 0) tokens(6).toFloat else 0.0f new TaxiRide(rideId, time, isStart, new GeoPoint(lon, lat), passengerCnt, travelDistance) } catch { case nfe: NumberFormatException => throw new RuntimeException("Invalid record: " + line, nfe) } } } /** * A geo point defined by a longitude and a latitude value. * * @param lon The longitude of the point. * @param lat The latitude of the point. */ case class GeoPoint(lon: Double, lat: Double)
fhueske/flink-streaming-demo
src/main/scala/com/dataartisans/flink_demo/datatypes/TaxiRide.scala
Scala
apache-2.0
3,380
/* * Copyright 2011-2018 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.http.request import java.nio.charset.Charset import io.gatling.commons.validation.Validation import io.gatling.core.body.{ ElFileBodies, RawFileBodies, ResourceAndCachedBytes } import io.gatling.core.config.GatlingConfiguration import io.gatling.core.session._ import io.gatling.core.util.FileResource import com.softwaremill.quicklens._ import org.asynchttpclient.request.body.multipart.{ ByteArrayPart, FilePart, Part, PartBase, StringPart } object BodyPart { def rawFileBodyPart(name: Option[Expression[String]], filePath: Expression[String])(implicit rawFileBodies: RawFileBodies): BodyPart = BodyPart(name, fileBodyPartBuilder(rawFileBodies.asResourceAndCachedBytes(filePath)), BodyPartAttributes()) def elFileBodyPart(name: Option[Expression[String]], filePath: Expression[String])(implicit configuration: GatlingConfiguration, elFileBodies: ElFileBodies): BodyPart = stringBodyPart(name, elFileBodies.asString(filePath)) def stringBodyPart(name: Option[Expression[String]], string: Expression[String])(implicit configuration: GatlingConfiguration): BodyPart = BodyPart(name, stringBodyPartBuilder(string), BodyPartAttributes(charset = Some(configuration.core.charset))) def byteArrayBodyPart(name: Option[Expression[String]], bytes: Expression[Array[Byte]]): BodyPart = BodyPart(name, byteArrayBodyPartBuilder(bytes), BodyPartAttributes()) private def stringBodyPartBuilder(string: Expression[String])(name: String, contentType: Option[String], charset: Option[Charset], fileName: Option[String], contentId: Option[String], transferEncoding: Option[String]): Expression[PartBase] = fileName match { case None => string.map { resolvedString => new StringPart(name, resolvedString, contentType.orNull, charset.orNull, contentId.orNull, transferEncoding.orNull) } case _ => byteArrayBodyPartBuilder(string.map(_.getBytes(charset.orNull)))(name, contentType, charset, fileName, contentId, transferEncoding) } private def byteArrayBodyPartBuilder(bytes: Expression[Array[Byte]])(name: String, contentType: Option[String], charset: Option[Charset], fileName: Option[String], contentId: Option[String], transferEncoding: Option[String]): Expression[PartBase] = bytes.map { resolvedBytes => new ByteArrayPart(name, resolvedBytes, contentType.orNull, charset.orNull, fileName.orNull, contentId.orNull, transferEncoding.orNull) } private def fileBodyPartBuilder(resource: Expression[ResourceAndCachedBytes])(name: String, contentType: Option[String], charset: Option[Charset], fileName: Option[String], contentId: Option[String], transferEncoding: Option[String]): Expression[PartBase] = session => for { ResourceAndCachedBytes(resource, cachedBytes) <- resource(session) } yield cachedBytes match { case Some(bytes) => new ByteArrayPart(name, bytes, contentType.orNull, charset.orNull, fileName.getOrElse(resource.name), contentId.orNull, transferEncoding.orNull) case None => resource match { case FileResource(file) => new FilePart(name, file, contentType.orNull, charset.orNull, fileName.getOrElse(file.getName), contentId.orNull, transferEncoding.orNull) case _ => new ByteArrayPart(name, resource.bytes, contentType.orNull, charset.orNull, fileName.getOrElse(resource.name), contentId.orNull, transferEncoding.orNull) } } } case class BodyPartAttributes( contentType: Option[Expression[String]] = None, charset: Option[Charset] = None, dispositionType: Option[Expression[String]] = None, fileName: Option[Expression[String]] = None, contentId: Option[Expression[String]] = None, transferEncoding: Option[String] = None, customHeaders: List[(String, Expression[String])] = Nil ) { lazy val customHeadersExpression: Expression[Seq[(String, String)]] = resolveIterable(customHeaders) } case class BodyPart( name: Option[Expression[String]], partBuilder: (String, Option[String], Option[Charset], Option[String], Option[String], Option[String]) => Expression[PartBase], // name, fileName attributes: BodyPartAttributes ) { def contentType(contentType: Expression[String]) = this.modify(_.attributes.contentType).setTo(Some(contentType)) def charset(charset: String) = this.modify(_.attributes.charset).setTo(Some(Charset.forName(charset))) def dispositionType(dispositionType: Expression[String]) = this.modify(_.attributes.dispositionType).setTo(Some(dispositionType)) def fileName(fileName: Expression[String]) = this.modify(_.attributes.fileName).setTo(Some(fileName)) def contentId(contentId: Expression[String]) = this.modify(_.attributes.contentId).setTo(Some(contentId)) def transferEncoding(transferEncoding: String) = this.modify(_.attributes.transferEncoding).setTo(Some(transferEncoding)) def header(name: String, value: Expression[String]) = this.modify(_.attributes.customHeaders).using(_ ::: List(name -> value)) def toMultiPart(session: Session): Validation[Part] = for { name <- resolveOptionalExpression(name, session) contentType <- resolveOptionalExpression(attributes.contentType, session) dispositionType <- resolveOptionalExpression(attributes.dispositionType, session) fileName <- resolveOptionalExpression(attributes.fileName, session) contentId <- resolveOptionalExpression(attributes.contentId, session) part <- partBuilder(name.orNull, contentType, attributes.charset, fileName, contentId, attributes.transferEncoding)(session) customHeaders <- attributes.customHeadersExpression(session) } yield { dispositionType.foreach(part.setDispositionType) customHeaders.foreach { case (headerName, headerValue) => part.addCustomHeader(headerName, headerValue) } part } }
wiacekm/gatling
gatling-http/src/main/scala/io/gatling/http/request/BodyPart.scala
Scala
apache-2.0
6,549
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import SharedHelpers.EventRecordingReporter import scala.concurrent.{Future, ExecutionContext} class AsyncFeatureSpecLikeSpec extends FunSpec { describe("AsyncFeatureSpecLike") { it("can be used for tests that return Future") { class ExampleSpec extends AsyncFeatureSpecLike { implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global val a = 1 scenario("test 1") { Future { assert(a == 1) } } scenario("test 2") { Future { assert(a == 2) } } scenario("test 3") { Future { pending } } scenario("test 4") { Future { cancel } } ignore("test 5") { Future { cancel } } override def newInstance = new ExampleSpec } val rep = new EventRecordingReporter val spec = new ExampleSpec val status = spec.run(None, Args(reporter = rep)) status.waitUntilCompleted() assert(rep.testStartingEventsReceived.length == 4) assert(rep.testSucceededEventsReceived.length == 1) assert(rep.testSucceededEventsReceived(0).testName == "Scenario: test 1") assert(rep.testFailedEventsReceived.length == 1) assert(rep.testFailedEventsReceived(0).testName == "Scenario: test 2") assert(rep.testPendingEventsReceived.length == 1) assert(rep.testPendingEventsReceived(0).testName == "Scenario: test 3") assert(rep.testCanceledEventsReceived.length == 1) assert(rep.testCanceledEventsReceived(0).testName == "Scenario: test 4") assert(rep.testIgnoredEventsReceived.length == 1) assert(rep.testIgnoredEventsReceived(0).testName == "Scenario: test 5") } it("can be used for tests that did not return Future") { class ExampleSpec extends AsyncFeatureSpecLike { implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global val a = 1 scenario("test 1") { assert(a == 1) } scenario("test 2") { assert(a == 2) } scenario("test 3") { pending } scenario("test 4") { cancel } ignore("test 5") { cancel } override def newInstance = new ExampleSpec } val rep = new EventRecordingReporter val spec = new ExampleSpec val status = spec.run(None, Args(reporter = rep)) status.waitUntilCompleted() assert(rep.testStartingEventsReceived.length == 4) assert(rep.testSucceededEventsReceived.length == 1) assert(rep.testSucceededEventsReceived(0).testName == "Scenario: test 1") assert(rep.testFailedEventsReceived.length == 1) assert(rep.testFailedEventsReceived(0).testName == "Scenario: test 2") assert(rep.testPendingEventsReceived.length == 1) assert(rep.testPendingEventsReceived(0).testName == "Scenario: test 3") assert(rep.testCanceledEventsReceived.length == 1) assert(rep.testCanceledEventsReceived(0).testName == "Scenario: test 4") assert(rep.testIgnoredEventsReceived.length == 1) assert(rep.testIgnoredEventsReceived(0).testName == "Scenario: test 5") } } }
cheeseng/scalatest
scalatest-test/src/test/scala/org/scalatest/AsyncFeatureSpecLikeSpec.scala
Scala
apache-2.0
3,959
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // scalastyle:off println package org.apache.spark.examples.mllib import org.apache.log4j.{Level, Logger} import org.apache.spark.mllib.clustering.KMeans import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.{SparkConf, SparkContext} import scopt.OptionParser /** * An example k-means app. Run with * {{{ * ./bin/run-example org.apache.spark.examples.mllib.DenseKMeans [options] <input> * }}} * If you use it as a template to create your own app, please use `spark-submit` to submit your app. */ object DenseKMeans { object InitializationMode extends Enumeration { type InitializationMode = Value val Random, Parallel = Value } import InitializationMode._ case class Params( input: String = null, k: Int = -1, numIterations: Int = 10, initializationMode: InitializationMode = Parallel) extends AbstractParams[Params] def main(args: Array[String]) { val defaultParams = Params() val parser = new OptionParser[Params]("DenseKMeans") { head("DenseKMeans: an example k-means app for dense data.") opt[Int]('k', "k") .required() .text(s"number of clusters, required") .action((x, c) => c.copy(k = x)) opt[Int]("numIterations") .text(s"number of iterations, default: ${defaultParams.numIterations}") .action((x, c) => c.copy(numIterations = x)) opt[String]("initMode") .text(s"initialization mode (${InitializationMode.values.mkString(",")}), " + s"default: ${defaultParams.initializationMode}") .action((x, c) => c.copy(initializationMode = InitializationMode.withName(x))) arg[String]("<input>") .text("input paths to examples") .required() .action((x, c) => c.copy(input = x)) } parser.parse(args, defaultParams) match { case Some(params) => run(params) case _ => sys.exit(1) } } def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"DenseKMeans with $params") val sc = new SparkContext(conf) Logger.getRootLogger.setLevel(Level.WARN) val examples = sc.textFile(params.input).map { line => Vectors.dense(line.split(' ').map(_.toDouble)) }.cache() val numExamples = examples.count() println(s"numExamples = $numExamples.") val initMode = params.initializationMode match { case Random => KMeans.RANDOM case Parallel => KMeans.K_MEANS_PARALLEL } val model = new KMeans() .setInitializationMode(initMode) .setK(params.k) .setMaxIterations(params.numIterations) .run(examples) val cost = model.computeCost(examples) println(s"Total cost = $cost.") sc.stop() } } // scalastyle:on println
chgm1006/spark-app
src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
Scala
apache-2.0
3,887
package im.actor.server.api.rpc.service import scala.concurrent.Future import scala.util.Random import com.amazonaws.auth.EnvironmentVariableCredentialsProvider import com.amazonaws.services.s3.transfer.TransferManager import com.google.protobuf.CodedInputStream import org.scalatest.Inside._ import slick.dbio.DBIO import im.actor.api.rpc._ import im.actor.api.rpc.groups._ import im.actor.api.rpc.messaging._ import im.actor.api.rpc.misc.ResponseSeqDate import im.actor.api.rpc.peers.{ OutPeer, PeerType, UserOutPeer } import im.actor.server.api.rpc.service.groups.{ GroupErrors, GroupInviteConfig, GroupsServiceImpl } import im.actor.server.api.rpc.service.sequence.{ SequenceServiceConfig, SequenceServiceImpl } import im.actor.server._ import im.actor.server.oauth.{ GoogleProvider, OAuth2GoogleConfig } import im.actor.server.peermanagers.{ PrivatePeerManager, GroupPeerManager } import im.actor.server.presences.{ GroupPresenceManager, PresenceManager } import im.actor.server.social.SocialManager import im.actor.server.util.{ GroupServiceMessages, ACLUtils } class GroupsServiceSpec extends BaseAppSuite with GroupsServiceHelpers with MessageParsing with ImplicitFileStorageAdapter { behavior of "GroupsService" it should "send invites on group creation" in e1 it should "send updates on group invite" in e2 it should "send updates ot title change" in e3 it should "persist service messages in history" in e4 it should "generate invite url for group member" in e5 it should "not generate invite url for group non members" in e6 it should "revoke invite token and generate new token for group member" in e7 it should "allow user to join group by correct invite link and send correct updates" in e8 it should "not allow group member to join group by invite link" in e9 it should "send updates on user join" in e10 it should "send UserInvited and UserJoined on user's first MessageRead" in e11 it should "receive userJoined once" in e12 it should "not allow to create group with empty name" in e13 implicit val sessionRegion = buildSessionRegionProxy() implicit val seqUpdManagerRegion = buildSeqUpdManagerRegion() implicit val socialManagerRegion = SocialManager.startRegion() implicit val presenceManagerRegion = PresenceManager.startRegion() implicit val groupPresenceManagerRegion = GroupPresenceManager.startRegion() implicit val groupPeerManagerRegion = GroupPeerManager.startRegion() val awsCredentials = new EnvironmentVariableCredentialsProvider() val groupInviteConfig = GroupInviteConfig("http://actor.im") implicit val privatePeerManagerRegion = PrivatePeerManager.startRegion() val sequenceConfig = SequenceServiceConfig.load().toOption.get val sequenceService = new SequenceServiceImpl(sequenceConfig) val messagingService = messaging.MessagingServiceImpl(mediator) implicit val service = new GroupsServiceImpl(groupInviteConfig) val oauthGoogleConfig = OAuth2GoogleConfig.load(system.settings.config.getConfig("services.google.oauth")) implicit val oauth2Service = new GoogleProvider(oauthGoogleConfig) implicit val authService = buildAuthService() def e1() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer whenReady(db.run(persist.sequence.SeqUpdate.findLast(authId2))) { uOpt ⇒ val u = uOpt.get u.header should ===(UpdateGroupInvite.header) } whenReady(db.run(persist.GroupUser.findUserIds(groupOutPeer.groupId))) { userIds ⇒ userIds.toSet should ===(Set(user1.id, user2.id)) } } def e2() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong(), user2OutPeer)) { resp ⇒ resp should matchPattern { case Ok(ResponseSeqDate(1001, _, _)) ⇒ } } whenReady(db.run(persist.sequence.SeqUpdate.find(authId2))) { updates ⇒ updates.map(_.header) should ===( Seq( UpdateGroupMembersUpdate.header, UpdateGroupAvatarChanged.header, UpdateGroupTitleChanged.header, UpdateGroupInvite.header ) ) } whenReady(db.run(persist.sequence.SeqUpdate.find(authId1).head)) { update ⇒ update.header should ===(UpdateGroupUserInvited.header) } } def e3() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer whenReady(service.handleEditGroupTitle(groupOutPeer, Random.nextLong(), "Very fun group")) { resp ⇒ resp should matchPattern { case Ok(ResponseSeqDate(1001, _, _)) ⇒ } } whenReady(db.run(persist.sequence.SeqUpdate.find(authId1))) { updates ⇒ updates.head.header should ===(UpdateGroupTitleChanged.header) } whenReady(db.run(persist.sequence.SeqUpdate.find(authId2))) { updates ⇒ updates.head.header should ===(UpdateGroupTitleChanged.header) } } def e4() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer whenReady(db.run(persist.HistoryMessage.find(user1.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 1 serviceMessages .map { e ⇒ parseMessage(e.messageContentData) } shouldEqual Vector(Right(GroupServiceMessages.groupCreated)) } whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong(), user2OutPeer)) { resp ⇒ resp should matchPattern { case Ok(_) ⇒ } whenReady(db.run(persist.HistoryMessage.find(user1.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 2 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual Vector( Right(GroupServiceMessages.userInvited(user2.id)), Right(GroupServiceMessages.groupCreated) ) } whenReady(db.run(persist.HistoryMessage.find(user2.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 1 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual Vector(Right(GroupServiceMessages.userInvited(user2.id))) } } //TODO: is it ok to remove avatar of group without avatar whenReady(service.handleRemoveGroupAvatar(groupOutPeer, Random.nextLong())) { resp ⇒ resp should matchPattern { case Ok(_) ⇒ } whenReady(db.run(persist.HistoryMessage.find(user1.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 3 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual Vector( Right(GroupServiceMessages.changedAvatar(None)), Right(GroupServiceMessages.userInvited(user2.id)), Right(GroupServiceMessages.groupCreated) ) } whenReady(db.run(persist.HistoryMessage.find(user2.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 2 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) } shouldEqual Vector( Right(GroupServiceMessages.changedAvatar(None)), Right(GroupServiceMessages.userInvited(user2.id)) ) } } whenReady(service.handleEditGroupTitle(groupOutPeer, Random.nextLong(), "Not fun group")) { resp ⇒ resp should matchPattern { case Ok(_) ⇒ } whenReady(db.run(persist.HistoryMessage.find(user1.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 4 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.changedTitle("Not fun group")) } } whenReady(service.handleLeaveGroup(groupOutPeer, Random.nextLong())(ClientData(authId2, sessionId, Some(user2.id)))) { resp ⇒ resp should matchPattern { case Ok(_) ⇒ } whenReady(db.run(persist.HistoryMessage.find(user1.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 5 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.userLeft(user2.id)) } } whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong(), user2OutPeer)) { resp ⇒ resp should matchPattern { case Ok(_) ⇒ } whenReady(db.run(persist.HistoryMessage.find(user1.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 6 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.userInvited(user2.id)) } } whenReady(service.handleKickUser(groupOutPeer, Random.nextLong(), user2OutPeer)) { resp ⇒ resp should matchPattern { case Ok(_) ⇒ } whenReady(db.run(persist.HistoryMessage.find(user1.id, models.Peer.group(groupOutPeer.groupId)))) { serviceMessages ⇒ serviceMessages should have length 7 serviceMessages.map { e ⇒ parseMessage(e.messageContentData) }.head shouldEqual Right(GroupServiceMessages.userKicked(user2.id)) } } } def e5() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer { implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) var expUrl: String = "" whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) expUrl = url } } whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) url shouldEqual expUrl } } } { implicit val clientData = ClientData(authId2, sessionId, Some(user2.id)) var expUrl: String = "" whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) expUrl = url } } whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) url shouldEqual expUrl } } } val findTokens = for { tokens ← DBIO.sequence(List( persist.GroupInviteToken.find(groupOutPeer.groupId, user1.id), persist.GroupInviteToken.find(groupOutPeer.groupId, user2.id) )) } yield tokens.flatten whenReady(db.run(findTokens)) { tokens ⇒ tokens should have length 2 tokens.foreach(_.groupId shouldEqual groupOutPeer.groupId) tokens.map(_.creatorId) should contain allOf (user1.id, user2.id) } } def e6() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer { implicit val clientData = ClientData(authId2, sessionId, Some(user2.id)) whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ resp should matchNotAuthorized } } } def e7() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = createGroup("Fun group", Set.empty).groupPeer var expUrl: String = "" whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) expUrl = url } } whenReady(service.handleRevokeInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) url should not equal expUrl } } whenReady(db.run(persist.GroupInviteToken.find(groupOutPeer.groupId, user1.id))) { tokens ⇒ tokens should have length 1 } } def e8() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData = ClientData(authId1, sessionId, Some(user1.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = createGroup("Invite Fun group", Set.empty).groupPeer whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) { implicit val clientData = ClientData(authId2, sessionId, Some(user2.id)) whenReady(service.handleJoinGroup(url)) { resp ⇒ resp should matchPattern { case Ok(ResponseJoinGroup(_, _, _, _, _, _)) ⇒ } } } } } whenReady(db.run(persist.GroupUser.findUserIds(groupOutPeer.groupId))) { userIds ⇒ userIds should have length 2 userIds should contain allOf (user1.id, user2.id) } } def e9() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() implicit val clientData = ClientData(authId1, createSessionId(), Some(user1.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = createGroup("Fun group", Set(user2.id)).groupPeer whenReady(service.handleGetGroupInviteUrl(groupOutPeer)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) { implicit val clientData = ClientData(authId2, createSessionId(), Some(user2.id)) whenReady(service.handleJoinGroup(url)) { resp ⇒ inside(resp) { case Error(err) ⇒ err shouldEqual GroupErrors.UserAlreadyInvited } } } } } } def e10() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() implicit val clientData1 = ClientData(authId1, sessionId, Some(user1.id)) val clientData2 = ClientData(authId2, sessionId, Some(user2.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val createGroupResponse = createGroup("Invite Fun group", Set.empty) val groupOutPeer = createGroupResponse.groupPeer whenReady(service.jhandleGetGroupInviteUrl(groupOutPeer, clientData1)) { resp ⇒ inside(resp) { case Ok(ResponseInviteUrl(url)) ⇒ url should startWith(groupInviteConfig.baseUrl) whenReady(service.jhandleJoinGroup(url, clientData2))(_ ⇒ ()) whenReady(sequenceService.jhandleGetDifference(createGroupResponse.seq, createGroupResponse.state, clientData1)) { diff ⇒ val resp = diff.toOption.get val updates = resp.updates updates should have length 1 val update = UpdateMessage.parseFrom(CodedInputStream.newInstance(updates.head.update)).right.toOption.get update.message shouldEqual GroupServiceMessages.userJoined } } } whenReady(db.run(persist.GroupUser.findUserIds(groupOutPeer.groupId))) { userIds ⇒ userIds should have length 2 userIds should contain allOf (user1.id, user2.id) } } def e11() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val sessionId = createSessionId() val clientData1 = ClientData(authId1, sessionId, Some(user1.id)) val clientData2 = ClientData(authId2, sessionId, Some(user2.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = { implicit val clientData = clientData1 val groupOutPeer = createGroup("Invite Fun group", Set.empty).groupPeer whenReady(service.handleInviteUser(groupOutPeer, Random.nextLong, user2OutPeer)) { _ ⇒ } groupOutPeer } { implicit val clientData = clientData2 // send it twice to ensure that ServiceMessage isn't sent twice whenReady(messagingService.handleMessageRead(OutPeer(PeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash), System.currentTimeMillis))(identity) whenReady(messagingService.handleMessageRead(OutPeer(PeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash), System.currentTimeMillis + 1))(identity) } Thread.sleep(1000) { implicit val clientData = clientData1 whenReady(sequenceService.handleGetDifference(0, Array.empty)) { diff ⇒ val resp = diff.toOption.get val updates = resp.updates updates should have length 5 val update = UpdateMessage.parseFrom(CodedInputStream.newInstance(updates.last.update)).right.toOption.get update.message shouldEqual GroupServiceMessages.userJoined } } } def e12() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() val clientData1 = ClientData(authId1, createSessionId(), Some(user1.id)) val clientData2 = ClientData(authId2, createSessionId(), Some(user2.id)) val user2Model = getUserModel(user2.id) val user2AccessHash = ACLUtils.userAccessHash(clientData1.authId, user2.id, user2Model.accessSalt) val user2OutPeer = UserOutPeer(user2.id, user2AccessHash) val groupOutPeer = { implicit val clientData = clientData1 createGroup("Fun group", Set.empty).groupPeer } val peer = OutPeer(PeerType.Group, groupOutPeer.groupId, groupOutPeer.accessHash) val url = whenReady(service.jhandleGetGroupInviteUrl(groupOutPeer, clientData1)) { _.toOption.get.url } messagingService.jhandleSendMessage(peer, 22324L, TextMessage("hello", Vector.empty, None), clientData1) whenReady(service.jhandleJoinGroup(url, clientData2)) { resp ⇒ resp should matchPattern { case Ok(ResponseJoinGroup(_, _, _, _, _, _)) ⇒ } } whenReady(messagingService.jhandleMessageRead(peer, System.currentTimeMillis, clientData2)) { _ ⇒ } Thread.sleep(1000) whenReady(sequenceService.jhandleGetDifference(0, Array.empty, clientData1)) { diff ⇒ val resp = diff.toOption.get val updates = resp.updates /** * updates should be: * * UpdateGroupInvite * * ServiceExGroupCreated * * UpdateMessage * * UpdateMessageRead */ updates should have length 4 val update = UpdateMessage.parseFrom(CodedInputStream.newInstance(updates(2).update)).right.toOption.get update.message shouldEqual GroupServiceMessages.userJoined } } def e13() = { val (user1, authId1, _) = createUser() val (user2, authId2, _) = createUser() implicit val clientData = ClientData(authId1, createSessionId(), Some(user1.id)) whenReady(service.handleCreateGroup(1L, "", Vector.empty)) { resp ⇒ inside(resp) { case Error(GroupErrors.WrongGroupTitle) ⇒ } } } }
shaunstanislaus/actor-platform
actor-server/actor-tests/src/test/scala/im/actor/server/api/rpc/service/GroupsServiceSpec.scala
Scala
mit
22,184
package com.sksamuel.elastic4s.searches.aggs.pipeline import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy case class AvgBucketDefinition(name: String, bucketsPath: String, gapPolicy: Option[GapPolicy] = None, format: Option[String] = None, metadata: Map[String, AnyRef] = Map.empty) extends PipelineAggregationDefinition { def format(format: String): AvgBucketDefinition = copy(format = Some(format)) def gapPolicy(gapPolicy: GapPolicy): AvgBucketDefinition = copy(gapPolicy = Some(gapPolicy)) def metadata(metadata: Map[String, AnyRef]): AvgBucketDefinition = copy(metadata = metadata) }
tyth/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/aggs/pipeline/AvgBucketDefinition.scala
Scala
apache-2.0
749
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.io._ import java.util.concurrent.{ConcurrentHashMap, LinkedBlockingQueue, ThreadPoolExecutor} import java.util.zip.{GZIPInputStream, GZIPOutputStream} import scala.collection.JavaConverters._ import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map} import scala.reflect.ClassTag import scala.util.control.NonFatal import org.apache.spark.broadcast.{Broadcast, BroadcastManager} import org.apache.spark.internal.Logging import org.apache.spark.rpc.{RpcCallContext, RpcEndpoint, RpcEndpointRef, RpcEnv} import org.apache.spark.scheduler.MapStatus import org.apache.spark.shuffle.MetadataFetchFailedException import org.apache.spark.storage.{BlockId, BlockManagerId, ShuffleBlockId} import org.apache.spark.util._ private[spark] sealed trait MapOutputTrackerMessage private[spark] case class GetMapOutputStatuses(shuffleId: Int) extends MapOutputTrackerMessage private[spark] case object StopMapOutputTracker extends MapOutputTrackerMessage private[spark] case class GetMapOutputMessage(shuffleId: Int, context: RpcCallContext) /** RpcEndpoint class for MapOutputTrackerMaster */ private[spark] class MapOutputTrackerMasterEndpoint( override val rpcEnv: RpcEnv, tracker: MapOutputTrackerMaster, conf: SparkConf) extends RpcEndpoint with Logging { logDebug("init") // force eager creation of logger override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { case GetMapOutputStatuses(shuffleId: Int) => val hostPort = context.senderAddress.hostPort logInfo("Asked to send map output locations for shuffle " + shuffleId + " to " + hostPort) val mapOutputStatuses = tracker.post(new GetMapOutputMessage(shuffleId, context)) case StopMapOutputTracker => logInfo("MapOutputTrackerMasterEndpoint stopped!") context.reply(true) stop() } } /** * Class that keeps track of the location of the map output of * a stage. This is abstract because different versions of MapOutputTracker * (driver and executor) use different HashMap to store its metadata. */ private[spark] abstract class MapOutputTracker(conf: SparkConf) extends Logging { /** Set to the MapOutputTrackerMasterEndpoint living on the driver. */ var trackerEndpoint: RpcEndpointRef = _ /** * This HashMap has different behavior for the driver and the executors. * * On the driver, it serves as the source of map outputs recorded from ShuffleMapTasks. * On the executors, it simply serves as a cache, in which a miss triggers a fetch from the * driver's corresponding HashMap. * * Note: because mapStatuses is accessed concurrently, subclasses should make sure it's a * thread-safe map. */ protected val mapStatuses: Map[Int, Array[MapStatus]] /** * Incremented every time a fetch fails so that client nodes know to clear * their cache of map output locations if this happens. */ protected var epoch: Long = 0 protected val epochLock = new AnyRef /** Remembers which map output locations are currently being fetched on an executor. */ private val fetching = new HashSet[Int] /** * Send a message to the trackerEndpoint and get its result within a default timeout, or * throw a SparkException if this fails. */ protected def askTracker[T: ClassTag](message: Any): T = { try { trackerEndpoint.askSync[T](message) } catch { case e: Exception => logError("Error communicating with MapOutputTracker", e) throw new SparkException("Error communicating with MapOutputTracker", e) } } /** Send a one-way message to the trackerEndpoint, to which we expect it to reply with true. */ protected def sendTracker(message: Any) { val response = askTracker[Boolean](message) if (response != true) { throw new SparkException( "Error reply received from MapOutputTracker. Expecting true, got " + response.toString) } } /** * Called from executors to get the server URIs and output sizes for each shuffle block that * needs to be read from a given reduce task. * * @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId, * and the second item is a sequence of (shuffle block id, shuffle block size) tuples * describing the shuffle blocks that are stored at that block manager. */ def getMapSizesByExecutorId(shuffleId: Int, reduceId: Int) : Seq[(BlockManagerId, Seq[(BlockId, Long)])] = { getMapSizesByExecutorId(shuffleId, reduceId, reduceId + 1) } /** * Called from executors to get the server URIs and output sizes for each shuffle block that * needs to be read from a given range of map output partitions (startPartition is included but * endPartition is excluded from the range). * * @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId, * and the second item is a sequence of (shuffle block id, shuffle block size) tuples * describing the shuffle blocks that are stored at that block manager. */ def getMapSizesByExecutorId(shuffleId: Int, startPartition: Int, endPartition: Int) : Seq[(BlockManagerId, Seq[(BlockId, Long)])] = { logDebug(s"Fetching outputs for shuffle $shuffleId, partitions $startPartition-$endPartition") val statuses = getStatuses(shuffleId) // Synchronize on the returned array because, on the driver, it gets mutated in place statuses.synchronized { return MapOutputTracker.convertMapStatuses(shuffleId, startPartition, endPartition, statuses) } } /** * Return statistics about all of the outputs for a given shuffle. */ def getStatistics(dep: ShuffleDependency[_, _, _]): MapOutputStatistics = { val statuses = getStatuses(dep.shuffleId) // Synchronize on the returned array because, on the driver, it gets mutated in place statuses.synchronized { val totalSizes = new Array[Long](dep.partitioner.numPartitions) for (s <- statuses) { for (i <- 0 until totalSizes.length) { totalSizes(i) += s.getSizeForBlock(i) } } new MapOutputStatistics(dep.shuffleId, totalSizes) } } /** * Get or fetch the array of MapStatuses for a given shuffle ID. NOTE: clients MUST synchronize * on this array when reading it, because on the driver, we may be changing it in place. * * (It would be nice to remove this restriction in the future.) */ private def getStatuses(shuffleId: Int): Array[MapStatus] = { val statuses = mapStatuses.get(shuffleId).orNull if (statuses == null) { logInfo("Don't have map outputs for shuffle " + shuffleId + ", fetching them") val startTime = System.currentTimeMillis var fetchedStatuses: Array[MapStatus] = null fetching.synchronized { // Someone else is fetching it; wait for them to be done while (fetching.contains(shuffleId)) { try { fetching.wait() } catch { case e: InterruptedException => } } // Either while we waited the fetch happened successfully, or // someone fetched it in between the get and the fetching.synchronized. fetchedStatuses = mapStatuses.get(shuffleId).orNull if (fetchedStatuses == null) { // We have to do the fetch, get others to wait for us. fetching += shuffleId } } if (fetchedStatuses == null) { // We won the race to fetch the statuses; do so logInfo("Doing the fetch; tracker endpoint = " + trackerEndpoint) // This try-finally prevents hangs due to timeouts: try { val fetchedBytes = askTracker[Array[Byte]](GetMapOutputStatuses(shuffleId)) fetchedStatuses = MapOutputTracker.deserializeMapStatuses(fetchedBytes) logInfo("Got the output locations") mapStatuses.put(shuffleId, fetchedStatuses) } finally { fetching.synchronized { fetching -= shuffleId fetching.notifyAll() } } } logDebug(s"Fetching map output statuses for shuffle $shuffleId took " + s"${System.currentTimeMillis - startTime} ms") if (fetchedStatuses != null) { return fetchedStatuses } else { logError("Missing all output locations for shuffle " + shuffleId) throw new MetadataFetchFailedException( shuffleId, -1, "Missing all output locations for shuffle " + shuffleId) } } else { return statuses } } /** Called to get current epoch number. */ def getEpoch: Long = { epochLock.synchronized { return epoch } } /** * Called from executors to update the epoch number, potentially clearing old outputs * because of a fetch failure. Each executor task calls this with the latest epoch * number on the driver at the time it was created. */ def updateEpoch(newEpoch: Long) { epochLock.synchronized { if (newEpoch > epoch) { logInfo("Updating epoch to " + newEpoch + " and clearing cache") epoch = newEpoch mapStatuses.clear() } } } /** Unregister shuffle data. */ def unregisterShuffle(shuffleId: Int) { mapStatuses.remove(shuffleId) } /** Stop the tracker. */ def stop() { } } /** * MapOutputTracker for the driver. * 跟踪所有的Mapper的输出的 */ private[spark] class MapOutputTrackerMaster(conf: SparkConf, broadcastManager: BroadcastManager, isLocal: Boolean) extends MapOutputTracker(conf) { /** Cache a serialized version of the output statuses for each shuffle to send them out faster */ private var cacheEpoch = epoch // The size at which we use Broadcast to send the map output statuses to the executors private val minSizeForBroadcast = conf.getSizeAsBytes("spark.shuffle.mapOutput.minSizeForBroadcast", "512k").toInt /** Whether to compute locality preferences for reduce tasks */ private val shuffleLocalityEnabled = conf.getBoolean("spark.shuffle.reduceLocality.enabled", true) // Number of map and reduce tasks above which we do not assign preferred locations based on map // output sizes. We limit the size of jobs for which assign preferred locations as computing the // top locations by size becomes expensive. private val SHUFFLE_PREF_MAP_THRESHOLD = 1000 // NOTE: This should be less than 2000 as we use HighlyCompressedMapStatus beyond that private val SHUFFLE_PREF_REDUCE_THRESHOLD = 1000 // Fraction of total map output that must be at a location for it to considered as a preferred // location for a reduce task. Making this larger will focus on fewer locations where most data // can be read locally, but may lead to more delay in scheduling if those locations are busy. private val REDUCER_PREF_LOCS_FRACTION = 0.2 // HashMaps for storing mapStatuses and cached serialized statuses in the driver. // Statuses are dropped only by explicit de-registering. protected val mapStatuses = new ConcurrentHashMap[Int, Array[MapStatus]]().asScala private val cachedSerializedStatuses = new ConcurrentHashMap[Int, Array[Byte]]().asScala private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf) // Kept in sync with cachedSerializedStatuses explicitly // This is required so that the Broadcast variable remains in scope until we remove // the shuffleId explicitly or implicitly. private val cachedSerializedBroadcast = new HashMap[Int, Broadcast[Array[Byte]]]() // This is to prevent multiple serializations of the same shuffle - which happens when // there is a request storm when shuffle start. private val shuffleIdLocks = new ConcurrentHashMap[Int, AnyRef]() // requests for map output statuses private val mapOutputRequests = new LinkedBlockingQueue[GetMapOutputMessage] // Thread pool used for handling map output status requests. This is a separate thread pool // to ensure we don't block the normal dispatcher threads. private val threadpool: ThreadPoolExecutor = { val numThreads = conf.getInt("spark.shuffle.mapOutput.dispatcher.numThreads", 8) val pool = ThreadUtils.newDaemonFixedThreadPool(numThreads, "map-output-dispatcher") for (i <- 0 until numThreads) { pool.execute(new MessageLoop) } pool } // Make sure that we aren't going to exceed the max RPC message size by making sure // we use broadcast to send large map output statuses. if (minSizeForBroadcast > maxRpcMessageSize) { val msg = s"spark.shuffle.mapOutput.minSizeForBroadcast ($minSizeForBroadcast bytes) must " + s"be <= spark.rpc.message.maxSize ($maxRpcMessageSize bytes) to prevent sending an rpc " + "message that is too large." logError(msg) throw new IllegalArgumentException(msg) } def post(message: GetMapOutputMessage): Unit = { mapOutputRequests.offer(message) } /** Message loop used for dispatching messages. */ private class MessageLoop extends Runnable { override def run(): Unit = { try { while (true) { try { val data = mapOutputRequests.take() if (data == PoisonPill) { // Put PoisonPill back so that other MessageLoops can see it. mapOutputRequests.offer(PoisonPill) return } val context = data.context val shuffleId = data.shuffleId val hostPort = context.senderAddress.hostPort logDebug("Handling request to send map output locations for shuffle " + shuffleId + " to " + hostPort) val mapOutputStatuses = getSerializedMapOutputStatuses(shuffleId) context.reply(mapOutputStatuses) } catch { case NonFatal(e) => logError(e.getMessage, e) } } } catch { case ie: InterruptedException => // exit } } } /** A poison endpoint that indicates MessageLoop should exit its message loop. */ private val PoisonPill = new GetMapOutputMessage(-99, null) // Exposed for testing private[spark] def getNumCachedSerializedBroadcast = cachedSerializedBroadcast.size def registerShuffle(shuffleId: Int, numMaps: Int) { if (mapStatuses.put(shuffleId, new Array[MapStatus](numMaps)).isDefined) { throw new IllegalArgumentException("Shuffle ID " + shuffleId + " registered twice") } // add in advance shuffleIdLocks.putIfAbsent(shuffleId, new Object()) } def registerMapOutput(shuffleId: Int, mapId: Int, status: MapStatus) { val array = mapStatuses(shuffleId) array.synchronized { array(mapId) = status } } /** Register multiple map output information for the given shuffle */ def registerMapOutputs(shuffleId: Int, statuses: Array[MapStatus], changeEpoch: Boolean = false) { mapStatuses.put(shuffleId, statuses.clone()) if (changeEpoch) { incrementEpoch() } } /** Unregister map output information of the given shuffle, mapper and block manager */ def unregisterMapOutput(shuffleId: Int, mapId: Int, bmAddress: BlockManagerId) { val arrayOpt = mapStatuses.get(shuffleId) if (arrayOpt.isDefined && arrayOpt.get != null) { val array = arrayOpt.get array.synchronized { if (array(mapId) != null && array(mapId).location == bmAddress) { array(mapId) = null } } incrementEpoch() } else { throw new SparkException("unregisterMapOutput called for nonexistent shuffle ID") } } /** Unregister shuffle data */ override def unregisterShuffle(shuffleId: Int) { mapStatuses.remove(shuffleId) cachedSerializedStatuses.remove(shuffleId) cachedSerializedBroadcast.remove(shuffleId).foreach(v => removeBroadcast(v)) shuffleIdLocks.remove(shuffleId) } /** Check if the given shuffle is being tracked */ def containsShuffle(shuffleId: Int): Boolean = { cachedSerializedStatuses.contains(shuffleId) || mapStatuses.contains(shuffleId) } /** * Return the preferred hosts on which to run the given map output partition in a given shuffle, * i.e. the nodes that the most outputs for that partition are on. * * @param dep shuffle dependency object * @param partitionId map output partition that we want to read * @return a sequence of host names */ def getPreferredLocationsForShuffle(dep: ShuffleDependency[_, _, _], partitionId: Int) : Seq[String] = { if (shuffleLocalityEnabled && dep.rdd.partitions.length < SHUFFLE_PREF_MAP_THRESHOLD && dep.partitioner.numPartitions < SHUFFLE_PREF_REDUCE_THRESHOLD) { val blockManagerIds = getLocationsWithLargestOutputs(dep.shuffleId, partitionId, dep.partitioner.numPartitions, REDUCER_PREF_LOCS_FRACTION) if (blockManagerIds.nonEmpty) { blockManagerIds.get.map(_.host) } else { Nil } } else { Nil } } /** * Return a list of locations that each have fraction of map output greater than the specified * threshold. * * @param shuffleId id of the shuffle * @param reducerId id of the reduce task * @param numReducers total number of reducers in the shuffle * @param fractionThreshold fraction of total map output size that a location must have * for it to be considered large. */ def getLocationsWithLargestOutputs( shuffleId: Int, reducerId: Int, numReducers: Int, fractionThreshold: Double) : Option[Array[BlockManagerId]] = { val statuses = mapStatuses.get(shuffleId).orNull if (statuses != null) { statuses.synchronized { if (statuses.nonEmpty) { // HashMap to add up sizes of all blocks at the same location val locs = new HashMap[BlockManagerId, Long] var totalOutputSize = 0L var mapIdx = 0 while (mapIdx < statuses.length) { val status = statuses(mapIdx) // status may be null here if we are called between registerShuffle, which creates an // array with null entries for each output, and registerMapOutputs, which populates it // with valid status entries. This is possible if one thread schedules a job which // depends on an RDD which is currently being computed by another thread. if (status != null) { val blockSize = status.getSizeForBlock(reducerId) if (blockSize > 0) { locs(status.location) = locs.getOrElse(status.location, 0L) + blockSize totalOutputSize += blockSize } } mapIdx = mapIdx + 1 } val topLocs = locs.filter { case (loc, size) => size.toDouble / totalOutputSize >= fractionThreshold } // Return if we have any locations which satisfy the required threshold if (topLocs.nonEmpty) { return Some(topLocs.keys.toArray) } } } } None } def incrementEpoch() { epochLock.synchronized { epoch += 1 logDebug("Increasing epoch to " + epoch) } } private def removeBroadcast(bcast: Broadcast[_]): Unit = { if (null != bcast) { broadcastManager.unbroadcast(bcast.id, removeFromDriver = true, blocking = false) } } private def clearCachedBroadcast(): Unit = { for (cached <- cachedSerializedBroadcast) removeBroadcast(cached._2) cachedSerializedBroadcast.clear() } def getSerializedMapOutputStatuses(shuffleId: Int): Array[Byte] = { var statuses: Array[MapStatus] = null var retBytes: Array[Byte] = null var epochGotten: Long = -1 // Check to see if we have a cached version, returns true if it does // and has side effect of setting retBytes. If not returns false // with side effect of setting statuses def checkCachedStatuses(): Boolean = { epochLock.synchronized { if (epoch > cacheEpoch) { cachedSerializedStatuses.clear() clearCachedBroadcast() cacheEpoch = epoch } cachedSerializedStatuses.get(shuffleId) match { case Some(bytes) => retBytes = bytes true case None => logDebug("cached status not found for : " + shuffleId) statuses = mapStatuses.getOrElse(shuffleId, Array.empty[MapStatus]) epochGotten = epoch false } } } if (checkCachedStatuses()) return retBytes var shuffleIdLock = shuffleIdLocks.get(shuffleId) if (null == shuffleIdLock) { val newLock = new Object() // in general, this condition should be false - but good to be paranoid val prevLock = shuffleIdLocks.putIfAbsent(shuffleId, newLock) shuffleIdLock = if (null != prevLock) prevLock else newLock } // synchronize so we only serialize/broadcast it once since multiple threads call // in parallel shuffleIdLock.synchronized { // double check to make sure someone else didn't serialize and cache the same // mapstatus while we were waiting on the synchronize if (checkCachedStatuses()) return retBytes // If we got here, we failed to find the serialized locations in the cache, so we pulled // out a snapshot of the locations as "statuses"; let's serialize and return that val (bytes, bcast) = MapOutputTracker.serializeMapStatuses(statuses, broadcastManager, isLocal, minSizeForBroadcast) logInfo("Size of output statuses for shuffle %d is %d bytes".format(shuffleId, bytes.length)) // Add them into the table only if the epoch hasn't changed while we were working epochLock.synchronized { if (epoch == epochGotten) { cachedSerializedStatuses(shuffleId) = bytes if (null != bcast) cachedSerializedBroadcast(shuffleId) = bcast } else { logInfo("Epoch changed, not caching!") removeBroadcast(bcast) } } bytes } } override def stop() { mapOutputRequests.offer(PoisonPill) threadpool.shutdown() sendTracker(StopMapOutputTracker) mapStatuses.clear() trackerEndpoint = null cachedSerializedStatuses.clear() clearCachedBroadcast() shuffleIdLocks.clear() } } /** * MapOutputTracker for the executors, which fetches map output information from the driver's * MapOutputTrackerMaster. */ private[spark] class MapOutputTrackerWorker(conf: SparkConf) extends MapOutputTracker(conf) { protected val mapStatuses: Map[Int, Array[MapStatus]] = new ConcurrentHashMap[Int, Array[MapStatus]]().asScala } // 保存 Shuffle Map Task 输出的位置信息 private[spark] object MapOutputTracker extends Logging { val ENDPOINT_NAME = "MapOutputTracker" private val DIRECT = 0 private val BROADCAST = 1 // Serialize an array of map output locations into an efficient byte format so that we can send // it to reduce tasks. We do this by compressing the serialized bytes using GZIP. They will // generally be pretty compressible because many map outputs will be on the same hostname. def serializeMapStatuses(statuses: Array[MapStatus], broadcastManager: BroadcastManager, isLocal: Boolean, minBroadcastSize: Int): (Array[Byte], Broadcast[Array[Byte]]) = { val out = new ByteArrayOutputStream out.write(DIRECT) val objOut = new ObjectOutputStream(new GZIPOutputStream(out)) Utils.tryWithSafeFinally { // Since statuses can be modified in parallel, sync on it statuses.synchronized { objOut.writeObject(statuses) } } { objOut.close() } val arr = out.toByteArray if (arr.length >= minBroadcastSize) { // Use broadcast instead. // Important arr(0) is the tag == DIRECT, ignore that while deserializing ! val bcast = broadcastManager.newBroadcast(arr, isLocal) // toByteArray creates copy, so we can reuse out out.reset() out.write(BROADCAST) val oos = new ObjectOutputStream(new GZIPOutputStream(out)) oos.writeObject(bcast) oos.close() val outArr = out.toByteArray logInfo("Broadcast mapstatuses size = " + outArr.length + ", actual size = " + arr.length) (outArr, bcast) } else { (arr, null) } } // Opposite of serializeMapStatuses. def deserializeMapStatuses(bytes: Array[Byte]): Array[MapStatus] = { assert (bytes.length > 0) def deserializeObject(arr: Array[Byte], off: Int, len: Int): AnyRef = { val objIn = new ObjectInputStream(new GZIPInputStream( new ByteArrayInputStream(arr, off, len))) Utils.tryWithSafeFinally { objIn.readObject() } { objIn.close() } } bytes(0) match { case DIRECT => deserializeObject(bytes, 1, bytes.length - 1).asInstanceOf[Array[MapStatus]] case BROADCAST => // deserialize the Broadcast, pull .value array out of it, and then deserialize that val bcast = deserializeObject(bytes, 1, bytes.length - 1). asInstanceOf[Broadcast[Array[Byte]]] logInfo("Broadcast mapstatuses size = " + bytes.length + ", actual size = " + bcast.value.length) // Important - ignore the DIRECT tag ! Start from offset 1 deserializeObject(bcast.value, 1, bcast.value.length - 1).asInstanceOf[Array[MapStatus]] case _ => throw new IllegalArgumentException("Unexpected byte tag = " + bytes(0)) } } /** * Given an array of map statuses and a range of map output partitions, returns a sequence that, * for each block manager ID, lists the shuffle block IDs and corresponding shuffle block sizes * stored at that block manager. * * If any of the statuses is null (indicating a missing location due to a failed mapper), * throws a FetchFailedException. * * @param shuffleId Identifier for the shuffle * @param startPartition Start of map output partition ID range (included in range) * @param endPartition End of map output partition ID range (excluded from range) * @param statuses List of map statuses, indexed by map ID. * @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId, * and the second item is a sequence of (shuffle block ID, shuffle block size) tuples * describing the shuffle blocks that are stored at that block manager. */ private def convertMapStatuses( shuffleId: Int, startPartition: Int, endPartition: Int, statuses: Array[MapStatus]): Seq[(BlockManagerId, Seq[(BlockId, Long)])] = { assert (statuses != null) val splitsByAddress = new HashMap[BlockManagerId, ArrayBuffer[(BlockId, Long)]] for ((status, mapId) <- statuses.zipWithIndex) { if (status == null) { val errorMessage = s"Missing an output location for shuffle $shuffleId" logError(errorMessage) throw new MetadataFetchFailedException(shuffleId, startPartition, errorMessage) } else { for (part <- startPartition until endPartition) { splitsByAddress.getOrElseUpdate(status.location, ArrayBuffer()) += ((ShuffleBlockId(shuffleId, mapId, part), status.getSizeForBlock(part))) } } } splitsByAddress.toSeq } }
wangyixiaohuihui/spark2-annotation
core/src/main/scala/org/apache/spark/MapOutputTracker.scala
Scala
apache-2.0
28,807
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.FunctionRegistry.FunctionBuilder import org.apache.spark.sql.catalyst.analysis.TypeCheckResult import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData, TypeUtils} import org.apache.spark.sql.types._ import org.apache.spark.unsafe.Platform import org.apache.spark.unsafe.array.ByteArrayMethods import org.apache.spark.unsafe.types.UTF8String /** * Returns an Array containing the evaluation of all children expressions. */ @ExpressionDescription( usage = "_FUNC_(expr, ...) - Returns an array with the given elements.", examples = """ Examples: > SELECT _FUNC_(1, 2, 3); [1,2,3] """) case class CreateArray(children: Seq[Expression]) extends Expression { override def foldable: Boolean = children.forall(_.foldable) override def checkInputDataTypes(): TypeCheckResult = { TypeUtils.checkForSameTypeInputExpr(children.map(_.dataType), s"function $prettyName") } override def dataType: ArrayType = { ArrayType( children.headOption.map(_.dataType).getOrElse(StringType), containsNull = children.exists(_.nullable)) } override def nullable: Boolean = false override def eval(input: InternalRow): Any = { new GenericArrayData(children.map(_.eval(input)).toArray) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val et = dataType.elementType val evals = children.map(e => e.genCode(ctx)) val (preprocess, assigns, postprocess, arrayData) = GenArrayData.genCodeToCreateArrayData(ctx, et, evals, false) ev.copy( code = preprocess + assigns + postprocess, value = arrayData, isNull = "false") } override def prettyName: String = "array" } private [sql] object GenArrayData { /** * Return Java code pieces based on DataType and isPrimitive to allocate ArrayData class * * @param ctx a [[CodegenContext]] * @param elementType data type of underlying array elements * @param elementsCode concatenated set of [[ExprCode]] for each element of an underlying array * @param isMapKey if true, throw an exception when the element is null * @return (code pre-assignments, concatenated assignments to each array elements, * code post-assignments, arrayData name) */ def genCodeToCreateArrayData( ctx: CodegenContext, elementType: DataType, elementsCode: Seq[ExprCode], isMapKey: Boolean): (String, String, String, String) = { val arrayDataName = ctx.freshName("arrayData") val numElements = elementsCode.length if (!ctx.isPrimitiveType(elementType)) { val arrayName = ctx.freshName("arrayObject") val genericArrayClass = classOf[GenericArrayData].getName val assignments = elementsCode.zipWithIndex.map { case (eval, i) => val isNullAssignment = if (!isMapKey) { s"$arrayName[$i] = null;" } else { "throw new RuntimeException(\\"Cannot use null as map key!\\");" } eval.code + s""" if (${eval.isNull}) { $isNullAssignment } else { $arrayName[$i] = ${eval.value}; } """ } val assignmentString = ctx.splitExpressionsWithCurrentInputs( expressions = assignments, funcName = "apply", extraArguments = ("Object[]", arrayName) :: Nil) (s"Object[] $arrayName = new Object[$numElements];", assignmentString, s"final ArrayData $arrayDataName = new $genericArrayClass($arrayName);", arrayDataName) } else { val arrayName = ctx.freshName("array") val unsafeArraySizeInBytes = UnsafeArrayData.calculateHeaderPortionInBytes(numElements) + ByteArrayMethods.roundNumberOfBytesToNearestWord(elementType.defaultSize * numElements) val baseOffset = Platform.BYTE_ARRAY_OFFSET val primitiveValueTypeName = ctx.primitiveTypeName(elementType) val assignments = elementsCode.zipWithIndex.map { case (eval, i) => val isNullAssignment = if (!isMapKey) { s"$arrayDataName.setNullAt($i);" } else { "throw new RuntimeException(\\"Cannot use null as map key!\\");" } eval.code + s""" if (${eval.isNull}) { $isNullAssignment } else { $arrayDataName.set$primitiveValueTypeName($i, ${eval.value}); } """ } val assignmentString = ctx.splitExpressionsWithCurrentInputs( expressions = assignments, funcName = "apply", extraArguments = ("UnsafeArrayData", arrayDataName) :: Nil) (s""" byte[] $arrayName = new byte[$unsafeArraySizeInBytes]; UnsafeArrayData $arrayDataName = new UnsafeArrayData(); Platform.putLong($arrayName, $baseOffset, $numElements); $arrayDataName.pointTo($arrayName, $baseOffset, $unsafeArraySizeInBytes); """, assignmentString, "", arrayDataName) } } } /** * Returns a catalyst Map containing the evaluation of all children expressions as keys and values. * The children are a flatted sequence of kv pairs, e.g. (key1, value1, key2, value2, ...) */ @ExpressionDescription( usage = "_FUNC_(key0, value0, key1, value1, ...) - Creates a map with the given key/value pairs.", examples = """ Examples: > SELECT _FUNC_(1.0, '2', 3.0, '4'); {1.0:"2",3.0:"4"} """) case class CreateMap(children: Seq[Expression]) extends Expression { lazy val keys = children.indices.filter(_ % 2 == 0).map(children) lazy val values = children.indices.filter(_ % 2 != 0).map(children) override def foldable: Boolean = children.forall(_.foldable) override def checkInputDataTypes(): TypeCheckResult = { if (children.size % 2 != 0) { TypeCheckResult.TypeCheckFailure( s"$prettyName expects a positive even number of arguments.") } else if (keys.map(_.dataType).distinct.length > 1) { TypeCheckResult.TypeCheckFailure( "The given keys of function map should all be the same type, but they are " + keys.map(_.dataType.simpleString).mkString("[", ", ", "]")) } else if (values.map(_.dataType).distinct.length > 1) { TypeCheckResult.TypeCheckFailure( "The given values of function map should all be the same type, but they are " + values.map(_.dataType.simpleString).mkString("[", ", ", "]")) } else { TypeCheckResult.TypeCheckSuccess } } override def dataType: DataType = { MapType( keyType = keys.headOption.map(_.dataType).getOrElse(StringType), valueType = values.headOption.map(_.dataType).getOrElse(StringType), valueContainsNull = values.exists(_.nullable)) } override def nullable: Boolean = false override def eval(input: InternalRow): Any = { val keyArray = keys.map(_.eval(input)).toArray if (keyArray.contains(null)) { throw new RuntimeException("Cannot use null as map key!") } val valueArray = values.map(_.eval(input)).toArray new ArrayBasedMapData(new GenericArrayData(keyArray), new GenericArrayData(valueArray)) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val mapClass = classOf[ArrayBasedMapData].getName val MapType(keyDt, valueDt, _) = dataType val evalKeys = keys.map(e => e.genCode(ctx)) val evalValues = values.map(e => e.genCode(ctx)) val (preprocessKeyData, assignKeys, postprocessKeyData, keyArrayData) = GenArrayData.genCodeToCreateArrayData(ctx, keyDt, evalKeys, true) val (preprocessValueData, assignValues, postprocessValueData, valueArrayData) = GenArrayData.genCodeToCreateArrayData(ctx, valueDt, evalValues, false) val code = s""" final boolean ${ev.isNull} = false; $preprocessKeyData $assignKeys $postprocessKeyData $preprocessValueData $assignValues $postprocessValueData final MapData ${ev.value} = new $mapClass($keyArrayData, $valueArrayData); """ ev.copy(code = code) } override def prettyName: String = "map" } /** * An expression representing a not yet available attribute name. This expression is unevaluable * and as its name suggests it is a temporary place holder until we're able to determine the * actual attribute name. */ case object NamePlaceholder extends LeafExpression with Unevaluable { override lazy val resolved: Boolean = false override def foldable: Boolean = false override def nullable: Boolean = false override def dataType: DataType = StringType override def prettyName: String = "NamePlaceholder" override def toString: String = prettyName } /** * Returns a Row containing the evaluation of all children expressions. */ object CreateStruct extends FunctionBuilder { def apply(children: Seq[Expression]): CreateNamedStruct = { CreateNamedStruct(children.zipWithIndex.flatMap { case (e: NamedExpression, _) if e.resolved => Seq(Literal(e.name), e) case (e: NamedExpression, _) => Seq(NamePlaceholder, e) case (e, index) => Seq(Literal(s"col${index + 1}"), e) }) } /** * Entry to use in the function registry. */ val registryEntry: (String, (ExpressionInfo, FunctionBuilder)) = { val info: ExpressionInfo = new ExpressionInfo( "org.apache.spark.sql.catalyst.expressions.NamedStruct", null, "struct", "_FUNC_(col1, col2, col3, ...) - Creates a struct with the given field values.", "", "", "", "") ("struct", (info, this)) } } /** * Common base class for both [[CreateNamedStruct]] and [[CreateNamedStructUnsafe]]. */ trait CreateNamedStructLike extends Expression { lazy val (nameExprs, valExprs) = children.grouped(2).map { case Seq(name, value) => (name, value) }.toList.unzip lazy val names = nameExprs.map(_.eval(EmptyRow)) override def nullable: Boolean = false override def foldable: Boolean = valExprs.forall(_.foldable) override lazy val dataType: StructType = { val fields = names.zip(valExprs).map { case (name, expr) => val metadata = expr match { case ne: NamedExpression => ne.metadata case _ => Metadata.empty } StructField(name.toString, expr.dataType, expr.nullable, metadata) } StructType(fields) } override def checkInputDataTypes(): TypeCheckResult = { if (children.length < 1) { TypeCheckResult.TypeCheckFailure( s"input to function $prettyName requires at least one argument") } else if (children.size % 2 != 0) { TypeCheckResult.TypeCheckFailure(s"$prettyName expects an even number of arguments.") } else { val invalidNames = nameExprs.filterNot(e => e.foldable && e.dataType == StringType) if (invalidNames.nonEmpty) { TypeCheckResult.TypeCheckFailure( "Only foldable StringType expressions are allowed to appear at odd position, got:" + s" ${invalidNames.mkString(",")}") } else if (!names.contains(null)) { TypeCheckResult.TypeCheckSuccess } else { TypeCheckResult.TypeCheckFailure("Field name should not be null") } } } /** * Returns Aliased [[Expression]]s that could be used to construct a flattened version of this * StructType. */ def flatten: Seq[NamedExpression] = valExprs.zip(names).map { case (v, n) => Alias(v, n.toString)() } override def eval(input: InternalRow): Any = { InternalRow(valExprs.map(_.eval(input)): _*) } } /** * Creates a struct with the given field names and values * * @param children Seq(name1, val1, name2, val2, ...) */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(name1, val1, name2, val2, ...) - Creates a struct with the given field names and values.", examples = """ Examples: > SELECT _FUNC_("a", 1, "b", 2, "c", 3); {"a":1,"b":2,"c":3} """) // scalastyle:on line.size.limit case class CreateNamedStruct(children: Seq[Expression]) extends CreateNamedStructLike { override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val rowClass = classOf[GenericInternalRow].getName val values = ctx.freshName("values") val valCodes = valExprs.zipWithIndex.map { case (e, i) => val eval = e.genCode(ctx) s""" |${eval.code} |if (${eval.isNull}) { | $values[$i] = null; |} else { | $values[$i] = ${eval.value}; |} """.stripMargin } val valuesCode = ctx.splitExpressionsWithCurrentInputs( expressions = valCodes, funcName = "createNamedStruct", extraArguments = "Object[]" -> values :: Nil) ev.copy(code = s""" |Object[] $values = new Object[${valExprs.size}]; |$valuesCode |final InternalRow ${ev.value} = new $rowClass($values); |$values = null; """.stripMargin, isNull = "false") } override def prettyName: String = "named_struct" } /** * Creates a struct with the given field names and values. This is a variant that returns * UnsafeRow directly. The unsafe projection operator replaces [[CreateStruct]] with * this expression automatically at runtime. * * @param children Seq(name1, val1, name2, val2, ...) */ case class CreateNamedStructUnsafe(children: Seq[Expression]) extends CreateNamedStructLike { override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val eval = GenerateUnsafeProjection.createCode(ctx, valExprs) ExprCode(code = eval.code, isNull = "false", value = eval.value) } override def prettyName: String = "named_struct_unsafe" } /** * Creates a map after splitting the input text into key/value pairs using delimiters */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(text[, pairDelim[, keyValueDelim]]) - Creates a map after splitting the text into key/value pairs using delimiters. Default delimiters are ',' for `pairDelim` and ':' for `keyValueDelim`.", examples = """ Examples: > SELECT _FUNC_('a:1,b:2,c:3', ',', ':'); map("a":"1","b":"2","c":"3") > SELECT _FUNC_('a'); map("a":null) """) // scalastyle:on line.size.limit case class StringToMap(text: Expression, pairDelim: Expression, keyValueDelim: Expression) extends TernaryExpression with CodegenFallback with ExpectsInputTypes { def this(child: Expression, pairDelim: Expression) = { this(child, pairDelim, Literal(":")) } def this(child: Expression) = { this(child, Literal(","), Literal(":")) } override def children: Seq[Expression] = Seq(text, pairDelim, keyValueDelim) override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType, StringType) override def dataType: DataType = MapType(StringType, StringType) override def checkInputDataTypes(): TypeCheckResult = { if (Seq(pairDelim, keyValueDelim).exists(! _.foldable)) { TypeCheckResult.TypeCheckFailure(s"$prettyName's delimiters must be foldable.") } else { super.checkInputDataTypes() } } override def nullSafeEval( inputString: Any, stringDelimiter: Any, keyValueDelimiter: Any): Any = { val keyValues = inputString.asInstanceOf[UTF8String].split(stringDelimiter.asInstanceOf[UTF8String], -1) val iterator = new Iterator[(UTF8String, UTF8String)] { var index = 0 val keyValueDelimiterUTF8String = keyValueDelimiter.asInstanceOf[UTF8String] override def hasNext: Boolean = { keyValues.length > index } override def next(): (UTF8String, UTF8String) = { val keyValueArray = keyValues(index).split(keyValueDelimiterUTF8String, 2) index += 1 (keyValueArray(0), if (keyValueArray.length < 2) null else keyValueArray(1)) } } ArrayBasedMapData(iterator, keyValues.size, identity, identity) } override def prettyName: String = "str_to_map" }
esi-mineset/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
Scala
apache-2.0
16,866
package com.surgeforward.examples import scala.collection.mutable.ListBuffer /** * Created by mnelson on 8/21/14. * */ object HigherOrderFunctions { sealed trait Animal { def name: String override def toString = name def fuzzy = false } case class Lion(name: String) extends Animal case class Tiger(name: String) extends Animal case class Bear(name: String) extends Animal { override def fuzzy = true } def main(args:Array[String]): Unit = { val animals = List( Lion("Leo"), Tiger("Tommy"), Bear("Phillip") ) val names = animals.filter(_.fuzzy).map(_.name) println(names) val names2 = animals.filter(a => a.fuzzy == true) println(names2) val byFuzzyness = animals .groupBy(a => a.fuzzy) .foreach { pair => pair._1 match { case true => println("Fuzzies: " + pair._2 ) case false => println("Not Fuzzies: " + pair._2 ) } } } }
realrunner/scala-examples
src/main/scala/com/surgeforward/examples/HigherOrderFunctions.scala
Scala
apache-2.0
959
package com.twitter.gizzard.proxy import com.twitter.logging.Logger import com.twitter.util.TimeConversions._ import com.twitter.util.{Future, Promise} import org.specs.Specification import org.specs.mock.{ClassMocker, JMocker} import com.twitter.gizzard.util.{Future => GizzardFuture} import com.twitter.gizzard.{Stats, TransactionalStatsProvider, TransactionalStatsConsumer, SampledTransactionalStatsConsumer} import com.twitter.gizzard.ConfiguredSpecification import java.util.concurrent._ object LoggingProxySpec extends ConfiguredSpecification with JMocker with ClassMocker { trait Named { def name: String def nameParts: Array[String] def namePartsSeq: Seq[String] def asyncName: Future[String] } trait Namer { def setName(name: String) } class FakeTransactionalStatsConsumer extends TransactionalStatsConsumer { var stats: TransactionalStatsProvider = null def apply(s: TransactionalStatsProvider) { stats = s } def reset { stats = null } } "Logging Proxy" should { val gFuture = new GizzardFuture("test", 1, 1, 1.second, 1.second) val executor = Executors.newSingleThreadExecutor() val bob = new Named { def name = { Stats.transaction.record("ack") "bob" } def nameParts = throw new Exception("yarrg!") def namePartsSeq = { Stats.transaction.record("before thread") val f = gFuture { Stats.transaction.record("in thread") Seq("bob", "marley") } f.get() } def asyncName = { val promise = new Promise[String] executor.submit(new Runnable { def run = { Thread.sleep(101) promise.setValue("bob") } }) promise } } val rob = new Namer { def setName(name: String) {} def setNameSlow(name: String) { Thread.sleep(100) } } val sampledStats = new FakeTransactionalStatsConsumer val sampledLoggingConsumer = new SampledTransactionalStatsConsumer(sampledStats, 1) val bobProxyFactory = new LoggingProxy[Named](Seq(sampledLoggingConsumer), "request", None) val bobProxy = bobProxyFactory(bob) val robProxyFactory = new LoggingProxy[Namer](Seq(sampledLoggingConsumer), "request", None) val robProxy = robProxyFactory(rob) doAfter { sampledStats.reset } "log a trace" in { bobProxy.name val messages = sampledStats.stats.toSeq.map { _.message } messages(0) mustEqual "ack" messages(1) must startWith("Total duration:") } "log a trace async" in { bobProxy.asyncName.apply() Stats.transactionOpt mustEqual None val messages = sampledStats.stats.toSeq.map { _.message } messages(0) must startWith("Total duration:") sampledStats.stats.get("duration").get.asInstanceOf[Long] must beGreaterThan(100L) } "log a trace across threads" in { bobProxy.namePartsSeq val messages = sampledStats.stats.toSeq.map { _.message } messages(0) mustEqual "before thread" messages(1) must startWith("Total duration:") val children = sampledStats.stats.children.map { _.toSeq.map { _.message } } children(0)(0) must startWith("Time spent in future queue") children(0)(1) mustEqual "in thread" children(0)(2) must startWith("Total duration:") } "log exceptions" in { bobProxy.nameParts must throwA[Exception] val messages = sampledStats.stats.toSeq.map { _.message } messages(0) mustEqual "Transaction failed with exception: java.lang.Exception: yarrg!" messages(1) must startWith("Total duration:") } } }
kmiku7/gizzard
src/test/scala/com/twitter/gizzard/proxy/LoggingProxySpec.scala
Scala
apache-2.0
3,676
package actors object ActorHelper { import play.api.libs.concurrent.Execution.Implicits.defaultContext import scala.concurrent.duration._ import akka.pattern.ask import akka.actor.ActorRef import akka.util.Timeout import scala.concurrent.Future import scala.concurrent.Await def get(msg:Any,actor:ActorRef):String = { implicit val timeout = Timeout(5 seconds) val result = (actor ? msg).mapTo[String].map { result => result.toString } Await.result(result, 5.seconds) } }
tnddn/iv-web
portal/rest-portal/app/actors/ActorHelper.scala
Scala
apache-2.0
505
/* * -╥⌐⌐⌐⌐ -⌐⌐⌐⌐- * ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕ * ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q * ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣ * ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒ * ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å * ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝` * ``˚¬ ⌐ ˚˚⌐´ * * Copyright © 2016 Flipkart.com */ package com.flipkart.connekt.commons.dao import com.flipkart.connekt.commons.entities.AppUser import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile, TMySQLFactory} import com.flipkart.connekt.commons.utils.StringUtils._ import org.springframework.dao.{DataAccessException, IncorrectResultSizeDataAccessException} class UserInfoDao(table: String, mysqlFactory: TMySQLFactory) extends TUserInfo with MySQLDao { val mysqlHelper = mysqlFactory override def getUserInfo(userId: String): Option[AppUser] = { implicit val j = mysqlHelper.getJDBCInterface val q = s""" |SELECT * FROM $table WHERE userId = ? """.stripMargin try { query[AppUser](q, userId) } catch { case e@(_: IncorrectResultSizeDataAccessException | _: DataAccessException) => ConnektLogger(LogFile.DAO).error(s"Error fetching user [$userId] info: ${e.getMessage}", e) throw e } } override def addUserInfo(user: AppUser) = { implicit val j = mysqlHelper.getJDBCInterface val q = s""" |INSERT INTO $table(userId, apikey, groups, updatedBy, contact) VALUES(?, ?, ?, ?, ?) |ON DUPLICATE KEY UPDATE groups = ?, updatedBy = ?, contact = ? """.stripMargin try { update(q, user.userId, user.apiKey, user.groups, user.updatedBy, user.contact, user.groups, user.updatedBy, user.contact) } catch { case e: DataAccessException => ConnektLogger(LogFile.DAO).error(s"Error adding user [${user.getJson}] info: ${e.getMessage}", e) throw e } } override def getUserByKey(key: String): Option[AppUser] = { implicit val j = mysqlHelper.getJDBCInterface val q = s""" |SELECT * FROM $table WHERE apiKey = ? """.stripMargin try { query[AppUser](q, key) } catch { case e: DataAccessException => ConnektLogger(LogFile.DAO).error(s"Error in getting $key", e) throw e } } override def removeUserById(userId: String): Unit = { implicit val j = mysqlHelper.getJDBCInterface val q = s""" |DELETE FROM $table WHERE userId = ? """.stripMargin try { update(q, userId) } catch { case e: DataAccessException => ConnektLogger(LogFile.DAO).error(s"Error in deleting client: $userId", e) throw e } } } object UserInfoDao { def apply(tableName: String = "USER_INFO", mysqlFactory: TMySQLFactory) = new UserInfoDao(tableName, mysqlFactory) }
Flipkart/connekt
commons/src/main/scala/com/flipkart/connekt/commons/dao/UserInfoDao.scala
Scala
mit
3,113
object test1: trait Foo[A] trait Baz[A] { trait Bar { this: Foo[A] => def bar(a: A): Unit } } object test2: trait Foo: private var f = "abc" trait Baz { trait Bam: val f = 0 trait Bar extends Bam { this: Foo => val g = f val g1: Int = g } } object test3: object DetSkipOctree { sealed trait Leaf [PL] sealed trait Branch[PL] } trait DetSkipOctree[PL] class Impl[PL] extends DetSkipOctree[PL] { final type Leaf = DetSkipOctree.Leaf[PL] protected trait LeftBranchImpl { this: DetSkipOctree.Branch[PL] => def demoteLeaf(point: PL, leaf: Leaf): Unit = ??? } }
dotty-staging/dotty
tests/pos/i9844.scala
Scala
apache-2.0
684
/* __ *\\ ** ________ ___ / / ___ __ ____ Scala.js API ** ** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2015, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | |__/ /____/ ** ** |/____/ ** \\* */ package scala.scalajs.macroimpls /** Represents the way a member of a JS object is selected */ private[macroimpls] sealed abstract class JSMemberSelection /** A member with statically known name */ private[macroimpls] final case class JSNamedMember(name: String) extends JSMemberSelection /** Calling the object */ private[macroimpls] case object JSMemberCall extends JSMemberSelection /** Accessing via brackets (array-like access) */ private[macroimpls] case object JSMemberBracketAccess extends JSMemberSelection /** Accessing and calling a member via brackets (with dynamic name) */ private[macroimpls] case object JSMemberBracketCall extends JSMemberSelection
renyaoxiang/scala-js
library/src/main/scala/scala/scalajs/macroimpls/JSMemberSelection.scala
Scala
bsd-3-clause
1,201
/* * Copyright (c) 2014-2016 * nonblocking.at gmbh [http://www.nonblocking.at] * * This file is part of Cliwix. * * Cliwix is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package at.nonblocking.cliwix.core.validation import at.nonblocking.cliwix.model.LiferayConfig trait LiferayConfigValidator { def validate(liferayConfig: LiferayConfig): List[ValidationError] }
nonblocking/cliwix
cliwix-core/src/main/scala/at/nonblocking/cliwix/core/validation/LiferayConfigValidator.scala
Scala
agpl-3.0
988
/*- * #%L * Core runtime for OOXOO * %% * Copyright (C) 2006 - 2017 Open Design Flow * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * #L% */ package com.idyria.osi.ooxoo.core.buffers.datatypes.compress import com.idyria.osi.ooxoo.core.buffers.structural.DataUnit import com.idyria.osi.ooxoo.core.buffers.structural.AbstractDataBuffer import org.apache.commons.compress.compressors.lzma.LZMACompressorInputStream import java.io.ByteArrayInputStream import org.odfi.tea.io.TeaIOUtils import org.apache.commons.compress.compressors.lzma.LZMACompressorOutputStream import java.io.ByteArrayOutputStream import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream import java.util.Base64 class ZipString extends AbstractDataBuffer[String] { var base64Buffer = "" override def streamIn(du: DataUnit) = { super.streamIn(du) if (du.isHierarchyClose) { decompressData } } def decompressData = { val decompressor = new BZip2CompressorInputStream(new ByteArrayInputStream(Base64.getDecoder.decode(base64Buffer))) this.data = new String(TeaIOUtils.swallowStream(decompressor)) base64Buffer = "" } def dataFromString(str: String): String = { base64Buffer += str.trim this.data } def dataToString: String = data match { case null => null case d => val outputBytes = new ByteArrayOutputStream val compression = new BZip2CompressorOutputStream(outputBytes,BZip2CompressorOutputStream.chooseBlockSize(d.size)) compression.write(d.getBytes) compression.flush compression.finish() compression.close() Base64.getEncoder.encodeToString(outputBytes.toByteArray()) } override def toString: String = this.dataToString }
richnou/ooxoo-core
ooxoo-core/src/main/scala/com/idyria/osi/ooxoo/core/buffers/datatypes/compress/ZipString.scala
Scala
agpl-3.0
2,442
package blended.launcher import java.io.{ File, FileOutputStream, PrintWriter, StringWriter } import java.net.URLClassLoader import java.nio.file.{ Files, Paths } import java.util.{ Hashtable, Properties, ServiceLoader, UUID } import scala.collection.JavaConverters._ import scala.collection.immutable.Map import scala.util.{ Failure, Success, Try } import scala.util.control.NonFatal import blended.launcher.config.LauncherConfig import blended.launcher.internal.ARM import blended.updater.config._ import blended.util.logging.Logger import com.typesafe.config.{ ConfigFactory, ConfigParseOptions } import de.tototec.cmdoption.{ CmdlineParser, CmdlineParserException } import org.osgi.framework.{ Bundle, Constants, FrameworkEvent, FrameworkListener } import org.osgi.framework.launch.{ Framework, FrameworkFactory } import org.osgi.framework.startlevel.{ BundleStartLevel, FrameworkStartLevel } import org.osgi.framework.wiring.FrameworkWiring object Launcher { private lazy val log = Logger[Launcher.type] private lazy val blendedHomeDir = Option(System.getProperty("blended.home")).getOrElse(".") private lazy val containerConfigDirectory = blendedHomeDir + "/etc" private lazy val containerIdFile = "blended.container.context.id" case class InstalledBundle(jarBundle: LauncherConfig.BundleConfig, bundle: Bundle) /** * Entry point of the launcher application. * * This methods will explicitly exit the VM! */ def main(args: Array[String]): Unit = { try { run(args) } catch { case t: LauncherException => log.debug(t)(s"Caught a LauncherException. Exiting with error code: ${t.errorCode} and message: ${t.getMessage()}") if (!t.getMessage().isEmpty()) Console.err.println(s"${t.getMessage()}") sys.exit(t.errorCode) case t: Throwable => log.error(t)("Caught an exception. Exiting with error code: 1") Console.err.println(s"Error: ${t.getMessage()}") sys.exit(1) } sys.exit(0) } private[this] def reportError(msg: String): Unit = { log.error(msg) Console.err.println(msg) sys.error(msg) } private[this] def parseArgs(args: Array[String]): Try[Cmdline] = Try { val cmdline = new Cmdline() val cp = new CmdlineParser(cmdline) try { cp.parse(args: _*) } catch { case e: CmdlineParserException => reportError(s"${e.getMessage()}\\nRun launcher --help for help.") } if (cmdline.help) { val sb = new java.lang.StringBuilder() cp.usage(sb) throw new LauncherException(sb.toString(), null, 0) } cmdline } private[this] def containerId(f: File, createContainerID: Boolean, onlyIfMissing: Boolean): Try[String] = { val idFile = new File(containerConfigDirectory, containerIdFile) if (idFile.exists() && idFile.isDirectory) { val msg = s"The file [${idFile.getAbsoluteFile}] exists and is a directory" log.error(msg) Console.err.println(msg) sys.error(msg) } val generateId = createContainerID && (!onlyIfMissing || !idFile.exists()) if (generateId && idFile.exists() && !idFile.canWrite()) { reportError(s"Container Id File [${idFile.getAbsolutePath}] is not writable") } if (generateId && idFile.exists()) idFile.delete() if (generateId) { log.info("Creating new container id") val uuid: CharSequence = UUID.randomUUID().toString.toCharArray Files.write(idFile.toPath, Seq(uuid).asJava) } Try { val lines = Files.readAllLines(Paths.get(idFile.getAbsolutePath)) if (!lines.isEmpty) lines.get(0) else sys.error("Empty container ID file") } } private[this] def createAndPrepareLaunch(configs: Configs, createContainerId: Boolean, onlyIfMissing: Boolean): Launcher = { val launcher = new Launcher(configs.launcherConfig) val errors = configs.profileConfig match { case Some(localConfig) => // Expose the List of mandatory container properties as a System Property // This will be evaluated by the Container Identifier Service val propNames = localConfig.resolvedRuntimeConfig.runtimeConfig.properties.getOrElse(RuntimeConfig.Properties.PROFILE_PROPERTY_KEYS, "") System.setProperty(RuntimeConfig.Properties.PROFILE_PROPERTY_KEYS, propNames) localConfig.validate( includeResourceArchives = false, explodedResourceArchives = true ) case None => // if no RuntimeConfig, just check existence of bundles launcher.validate() } if (!errors.isEmpty) sys.error("Could not start the OSGi Framework. Details:\\n" + errors.mkString("\\n")) containerId(new File(containerConfigDirectory, containerIdFile), createContainerId, onlyIfMissing) match { case Failure(e) => val msg = "Launcher is unable to determine the container id." configs.profileConfig match { case Some(c) => // Profile mode, this is an error log.error(e)(msg) Console.err.println(msg) sys.error(msg) case None => // simple config mode, this is not an error log.warn(e)(msg) } case Success(id) => log.info(s"ContainerId is [$id] ") } launcher } /** * Use this method instead of `main` if you do not want to exit the VM * and instead get an [LauncherException] in case of a error. * * @throws LauncherException */ def run(args: Array[String]): Unit = { val cmdline = parseArgs(args).get val handleFrameworkRestart = cmdline.handleFrameworkRestart var firstStart = true var retVal: Int = 0 do { val configs = try { readConfigs(cmdline) } catch { case e: Throwable => log.error(e)("Could not read configs") throw e } log.debug(s"Configs: ${configs}") cmdline.writeSystemProperties match { case Some(propFile) => log.info("Running with --write-system-properties. About to generate properties file and exit") val fileProps = new Properties() configs.launcherConfig.systemProperties.foreach { case (k, v) => fileProps.setProperty(k, v) } try { ARM.using(new FileOutputStream(propFile)) { stream => fileProps.store(stream, "Generated by Launcher") log.info(s"Wrote system properties file: ${propFile}") } retVal = 0 } catch { case e: Throwable => log.error(e)(s"Could not write system properties file: ${propFile}") retVal = 1 } case None => val createContainerId = firstStart && (cmdline.resetContainerId || cmdline.initContainerId) val launcher = createAndPrepareLaunch(configs, createContainerId, cmdline.initContainerId) retVal = launcher.run(cmdline) firstStart = false } } while (handleFrameworkRestart && retVal == 2) if (retVal != 0) throw new LauncherException("", errorCode = retVal) } case class Configs(launcherConfig: LauncherConfig, profileConfig: Option[LocalRuntimeConfig] = None) /** * Parse the command line and wrap the result into a [[Configs]] object. */ def readConfigs(cmdline: Cmdline): Configs = { cmdline.configFile match { case Some(configFile) => log.info(s"About to read configFile: [${configFile}]") val config = ConfigFactory.parseFile(new File(configFile), ConfigParseOptions.defaults().setAllowMissing(false)).resolve() Configs(LauncherConfig.read(config)) case None => val profileLookup: Option[ProfileLookup] = cmdline.profileLookup.map { pl => log.info(s"About to read profile lookup file: [$pl]") val c = ConfigFactory.parseFile(new File(pl), ConfigParseOptions.defaults().setAllowMissing(false)).resolve() ProfileLookup.read(c).map { pl => pl.copy(profileBaseDir = pl.profileBaseDir.getAbsoluteFile()) }.get } val profile: String = profileLookup match { case Some(pl) => pl.materializedDir.getPath() case None => cmdline.profileDir match { case Some(profile) => profile case None => sys.error("Either a config file or a profile dir or file or a profile lookup path must be given") } } val (profileDir, profileFile) = if (new File(profile).isDirectory()) { profile -> new File(profile, "profile.conf") } else { Option(new File(profile).getParent()).getOrElse(".") -> new File(profile) } log.info(s"Using profile directory : [$profileDir]") log.info(s"Using profile file : [${profileFile.getAbsolutePath}]") val config = ConfigFactory.parseFile(profileFile, ConfigParseOptions.defaults().setAllowMissing(false)).resolve() val runtimeConfig = ResolvedRuntimeConfig(RuntimeConfigCompanion.read(config).get) val launchConfig = ConfigConverter.runtimeConfigToLauncherConfig(runtimeConfig, profileDir) var brandingProps = Map( RuntimeConfig.Properties.PROFILE_DIR -> profileDir ) var overlayProps = Map[String, String]() profileLookup.foreach { pl => brandingProps ++= Map( RuntimeConfig.Properties.PROFILE_LOOKUP_FILE -> new File(cmdline.profileLookup.get).getAbsolutePath(), RuntimeConfig.Properties.PROFILES_BASE_DIR -> pl.profileBaseDir.getAbsolutePath(), RuntimeConfig.Properties.OVERLAYS -> pl.overlays.map(or => s"${or.name}:${or.version}").mkString(",") ) val knownOverlays = LocalOverlays.findLocalOverlays(new File(profileDir).getAbsoluteFile()) knownOverlays.find(ko => ko.overlayRefs.toSet == pl.overlays.toSet) match { case None => if (!pl.overlays.isEmpty) { sys.error("Cannot find specified overlay set: " + pl.overlays.toSeq.sorted.mkString(", ")) } else { log.error("Cannot find the empty overlay set (aka 'base.conf'). To be compatible with older version, we continue here as no real information is missing") } case Some(localOverlays) => val newOverlayProps = localOverlays.properties log.debug("Found overlay provided properties: " + newOverlayProps) overlayProps ++= newOverlayProps } } Configs( launcherConfig = launchConfig.copy( branding = launchConfig.branding ++ brandingProps, systemProperties = SystemPropertyResolver.resolve((launchConfig.systemProperties ++ overlayProps) + ("blended.container.home" -> profileDir)) ), profileConfig = Some(LocalRuntimeConfig(runtimeConfig, new File(profileDir))) ) } } def apply(configFile: File): Launcher = new Launcher(LauncherConfig.read(configFile)) class RunningFramework(val framework: Framework) { def awaitFrameworkStop(framwork: Framework): Int = { val event = framework.waitForStop(0) event.getType match { case FrameworkEvent.ERROR => log.info(event.getThrowable())("Framework has encountered an error: ") 1 case FrameworkEvent.STOPPED => log.info("Framework has been stopped by bundle " + event.getBundle) 0 case FrameworkEvent.STOPPED_UPDATE => log.info("Framework has been updated by " + event.getBundle + " and need a restart") 2 case _ => log.info("Framework stopped. Reason: " + event.getType + " from bundle " + event.getBundle) 0 } } val shutdownHook = new Thread("framework-shutdown-hook") { override def run(): Unit = { log.info("Catched kill signal: stopping framework") framework.stop() awaitFrameworkStop(framework) BrandingProperties.setLastBrandingProperties(new Properties()) } } Runtime.getRuntime.addShutdownHook(shutdownHook) def waitForStop(): Int = { try { awaitFrameworkStop(framework) } catch { case NonFatal(x) => log.error(x)("Framework was interrupted. Cause: ") 1 } finally { BrandingProperties.setLastBrandingProperties(new Properties()) Try { Runtime.getRuntime.removeShutdownHook(shutdownHook) } } } } } class Launcher private (config: LauncherConfig) { import Launcher._ private[this] val log = Logger[Launcher] /** * Validate this Launcher's configuration and return the issues if any found. */ def validate(): Seq[String] = { val files = ("Framework JAR", config.frameworkJar) :: config.bundles.toList.map(b => "Bundle JAR" -> b.location) files.flatMap { case (kind, file) => val f = new File(file).getAbsoluteFile() if (!f.exists()) Some(s"${kind} ${f} does not exists") else if (!f.isFile()) Some(s"${kind} ${f} is not a file") else if (!f.canRead()) Some(s"${kind} ${f} is not readable") else None } } /** * Run an (embedded) OSGiFramework based of this Launcher's configuration. */ def start(cmdLine: Cmdline): Try[Framework] = Try { log.info(s"Starting OSGi framework based on config: ${config}"); // Try to locate and load the OSGi framework factory val frameworkURL = new File(config.frameworkJar).getAbsoluteFile.toURI().normalize().toURL() log.info("Framework Bundle from: " + frameworkURL) if (!new File(frameworkURL.getFile()).exists) throw new RuntimeException("Framework Bundle does not exist") val cl = new URLClassLoader(Array(frameworkURL), getClass.getClassLoader) log.debug("About to load FrameworkFactory") val frameworkFactory = ServiceLoader.load(classOf[FrameworkFactory], cl).iterator().next() log.debug("Loaded framework factory: " + frameworkFactory) // Statically export branding properties val brandingProps = { val brandingProps = new Properties() config.branding.foreach { case (k, v) => brandingProps.setProperty(k, v) } BrandingProperties.setLastBrandingProperties(brandingProps) log.debug("Exposing branding via class " + classOf[BrandingProperties].getName() + ": " + brandingProps) brandingProps } // Set system properties found in config config.systemProperties foreach { p => log.info(s"Setting System property [${p._1}] to [${p._2}]") System.setProperty(p._1, p._2) } log.info("About to create framework instance...") val framework = frameworkFactory.newFramework(config.frameworkProperties.asJava) log.debug("Framework created: " + framework) log.debug("About to adapt framework to FrameworkStartLevel") val frameworkStartLevel = framework.adapt(classOf[FrameworkStartLevel]) frameworkStartLevel.setInitialBundleStartLevel(config.defaultStartLevel) log.debug("About to start framework") framework.start() log.info(s"Framework started. State: ${framework.getState}") { val props = new Hashtable[String, AnyRef]() props.put("blended.launcher", "true") framework.getBundleContext.registerService(classOf[Properties], brandingProps, props) } log.info("Installing bundles"); val context = framework.getBundleContext() val osgiBundles = config.bundles.map { b => log.info(s"Installing Bundle: ${b}") // TODO: What happens here, if the JAR is not a bundle? val osgiBundle = context.installBundle(new File(b.location).getAbsoluteFile.toURI().normalize().toString()) log.info("Bundle installed: " + b) val bundleStartLevel = osgiBundle.adapt(classOf[BundleStartLevel]) log.debug(s"Setting start level for bundle ${osgiBundle.getSymbolicName()} to ${b.startLevel}") bundleStartLevel.setStartLevel(b.startLevel) InstalledBundle(b, osgiBundle) } log.info(s"${osgiBundles.size} bundles installed") def isFragment(b: InstalledBundle) = b.bundle.getHeaders.get(Constants.FRAGMENT_HOST) != null // Iterate over start levels and activate bundles in the correct order 1.to(config.startLevel).map { startLevel => log.info(s"------ Entering start level [$startLevel] ------") frameworkStartLevel.setStartLevel(startLevel, new FrameworkListener() { override def frameworkEvent(event: FrameworkEvent): Unit = { log.debug(s"Active start level ${startLevel} reached") } }) val bundlesToStart = osgiBundles.filter(b => b.jarBundle.startLevel == startLevel && b.jarBundle.start && !isFragment(b)) log.info(s"Starting ${bundlesToStart.size} bundles"); val startedBundles = bundlesToStart.map { bundle => val result = Try { bundle.bundle.start() } log.info(s"State of ${bundle.bundle.getSymbolicName}: ${bundle.bundle.getState}") result match { case Success(_) => case Failure(t) => val sw = new StringWriter() t.printStackTrace(new PrintWriter(sw)) log.error("\\n" + sw.getBuffer().toString() + "\\n") } bundle -> result } log.info(s"${startedBundles.filter(_._2.isSuccess).size} bundles started"); val failedBundles = startedBundles.filter(_._2.isFailure) if (!failedBundles.isEmpty) { log.warn(s"Could not start some bundles:\\n${ failedBundles.map(failed => s"\\n - ${failed._1}\\n ---> ${failed._2}") }") if (cmdLine.strict) { // in strict mode, bundles that failed to start fail the whole container log.warn("Shutting down container due to bundle start failures.") framework.stop() } } } val bundlesInInstalledState = osgiBundles.filter(b => b.bundle.getState() == Bundle.INSTALLED && !isFragment(b)) // now we try to also resolve the remaining bundles if (bundlesInInstalledState.nonEmpty) { log.debug(s"The following bundles are in installed state: ${bundlesInInstalledState.map(b => s"${b.bundle.getSymbolicName}-${b.bundle.getVersion}")}") log.info("Resolving installed bundles") val frameworkWiring = framework.adapt(classOf[FrameworkWiring]) frameworkWiring.resolveBundles(null /* all bundles */ ) val secondAttemptInstalled = osgiBundles.filter(b => b.bundle.getState() == Bundle.INSTALLED && !isFragment(b)) log.debug(s"The following bundles could not be resolved : ${ secondAttemptInstalled.map( b => s"${b.bundle.getSymbolicName}-${b.bundle.getVersion}" ).mkString("\\n", "\\n", "") }") if (secondAttemptInstalled.nonEmpty && cmdLine.strict) { // in strict mode, nor resolved bundles fail the whole container log.error("Shutting down container due to unresolved bundles.") framework.stop() } } log.info("Laucher finished starting of framework and bundles. Awaiting framework termination now.") // Framework and bundles started framework } /** * Run an (embedded) OSGiFramework based of this Launcher's configuration. */ def run(cmdLine: Cmdline): Int = { start(cmdLine) match { case Success(framework) => val handle = new RunningFramework(framework) if (cmdLine.test) { // Special test mode, we started successfully, and can now stop framework.stop() } handle.waitForStop() case Failure(e) => log.error(e)("Could not start framework") 1 } } }
lefou/blended
blended.launcher/src/main/scala/blended/launcher/Launcher.scala
Scala
apache-2.0
19,682
package com.jonatantierno.rhymes object Declarative{ trait IO[P[_]]{ def read(): P[String] def write(msg: String): P[Unit] } object IO{ def apply[P[_]](implicit IO: IO[P]) = IO object Syntax{ def read[P[_]]()(implicit IO: IO[P]) = IO.read() def write[P[_]](msg: String)(implicit IO: IO[P]) = IO.write(msg) } } object Programs extends Verse with Stress with Syllable with Rhymes { import scalaz.Monad import scalaz.syntax.monad._, IO.Syntax._ def writeMsg[P[_]: IO: Monad](msg: String): P[Unit] = for { _ <- write(msg) } yield() def findRhymes[P[_]: IO: Monad](target: String): P[Unit] = { for{ _ <- write(describeWord(target)) elQuijote <- read _ <- write(getRhymesAsString(target, elQuijote)) } yield() } def findVerses[P[_]: IO: Monad](verse: Array[String]): P[Unit] = { val target: String = verse.reduce(_ + " " + _) for{ _ <- write(describeWord(lastWord(target).replace(" ","")) ) elQuijote <- read _ <- write(getVerses(target, elQuijote)) } yield() } } object APIInstantiation { import scalaz.{Monad, Id}, Id.Id implicit object QuijoteIO extends IO[Id] { import scala.io.StdIn.readLine def read() = Quijote.get() def write(msg: String) = print(msg) } implicit object CachedQuijoteIO extends IO[Id] { def read() = Quijote.getCached() def write(msg: String) = print(msg) } implicit object IdMonad extends Monad[Id]{ def point[A](a: => A): Id[A] = a def bind[A,B](p: Id[A])(f: A => Id[B]): Id[B] = f(p) } } }
jonatantierno/rhymes
app/models/com/jonatantierno/rhymes/Programs.scala
Scala
apache-2.0
1,660
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.utils.T import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest import org.scalatest.{FlatSpec, Matchers} import scala.util.Random class SumSpec extends FlatSpec with Matchers { "sum" should "work correctly" in { val input = Tensor[Float](T( T(1.0f, 2.0f), T(3.0f, 4.0f) )) val layer = Sum[Float](dimension = 2) val expect = Tensor[Float](T(3.0f, 7.0f)) layer.forward(input) should be(expect) } "sum" should "work correctly without squeeze" in { val input = Tensor[Float](T( T(1.0f, 2.0f), T(3.0f, 4.0f) )) val layer = Sum[Float](dimension = 2, squeeze = false) val expect = Tensor[Float](T(T(3.0f), T(7.0f))) layer.forward(input) should be(expect) } "sum" should "be correct when squeeze on vector" in { val vector = Tensor[Int](T(1, 2, 3)) val sum = Sum[Int](dimension = 1, squeeze = true) sum.forward(vector) should be(Tensor.scalar(6)) } } class SumSerialTest extends ModuleSerializationTest { override def test(): Unit = { val sum = Sum[Float](2).setName("sum") val input = Tensor[Float](5, 5).apply1(_ => Random.nextFloat()) runSerializationTest(sum, input) } }
yiheng/BigDL
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/SumSpec.scala
Scala
apache-2.0
1,922
/* * Copyright (C) 2005, The Beangle Software. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.beangle.data.jdbc import java.sql.Types import org.beangle.commons.lang.annotation.value import org.beangle.data.jdbc.engine.Engines import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpec class SqlTypeMappingTest extends AnyFunSpec with Matchers { describe("SqlTypeMapping") { it("test value type") { val mapping = new DefaultSqlTypeMapping(Engines.forName("h2")) assert(mapping.sqlCode(classOf[Terms]) == Types.SMALLINT) assert(mapping.sqlCode(Meta.A.getClass) == Types.INTEGER) assert(mapping.sqlCode(classOf[Array[Byte]]) == Types.VARBINARY) } } } @value class Terms(value: Short) enum Meta { case A,B,C }
beangle/data
jdbc/src/test/scala/org/beangle/data/jdbc/SqlTypeMappingTest.scala
Scala
lgpl-3.0
1,416
package com.michalrus.nofatty.ui import java.awt.{ GridLayout, BorderLayout } import javax.swing.JPanel import javax.swing.border.EmptyBorder import com.michalrus.nofatty.data.Prefs import com.michalrus.nofatty.ui.utils.Slider class PrefsPane(onChange: ⇒ Unit) extends JPanel { def updateMod(pref: Prefs.Pref[Double], mod: Double ⇒ Double): Double ⇒ Unit = v ⇒ { pref.set(mod(v)); onChange } def update(pref: Prefs.Pref[Double]) = updateMod(pref, identity) val weightSmoothing = new Slider(0.20 to 0.95 by 0.01, 1.0 - Prefs.weightAlpha.get, "Weight trend smoothing", d ⇒ f"${d * 100.0}%.0f%%", updateMod(Prefs.weightAlpha, 1.0 - _), updateMod(Prefs.weightAlpha, 1.0 - _)) val energySmoothing = new Slider(0.20 to 0.95 by 0.01, 1.0 - Prefs.energyAlpha.get, "Energy trend smoothing", d ⇒ f"${d * 100.0}%.0f%%", updateMod(Prefs.energyAlpha, 1.0 - _), updateMod(Prefs.energyAlpha, 1.0 - _)) val energyValueMarker = new Slider(0.0 to 6000.0 by 1.0, Prefs.energyMarker.get, "Energy value marker", d ⇒ f"$d%.0f kcal", update(Prefs.energyMarker), update(Prefs.energyMarker)) setOpaque(false) setLayout(new BorderLayout) add({ val p = new JPanel p.setOpaque(false) p.setLayout(new GridLayout(3, 1)) Seq(weightSmoothing, energySmoothing, energyValueMarker) foreach { sl ⇒ sl.setBorder(new EmptyBorder(10, 10, 10, 10)) p.add(sl) } p }, BorderLayout.PAGE_START) }
michalrus/nofatty
src/main/scala/com/michalrus/nofatty/ui/PrefsPane.scala
Scala
apache-2.0
1,443
package controllers import play.api.mvc.Action import lila.app._ import lila.common.HTTPRequest import lila.game.{ Game => GameModel, GameRepo } import play.api.http.ContentTypes import views._ object Game extends LilaController { private def paginator = Env.game.paginator private def analysePaginator = Env.analyse.paginator private def cached = Env.game.cached private def searchEnv = Env.gameSearch def searchForm = searchEnv.forms.search def delete(gameId: String) = Auth { implicit ctx => me => OptionFuResult(GameRepo game gameId) { game => if (game.pgnImport.flatMap(_.user) ?? (me.id==)) { Env.hub.actor.bookmark ! lila.hub.actorApi.bookmark.Remove(game.id) (GameRepo remove game.id) >> (lila.analyse.AnalysisRepo remove game.id) >> Env.game.cached.clearNbImportedByCache(me.id) inject Redirect(routes.User.show(me.username)) } else fuccess { Redirect(routes.Round.watcher(game.id, game.firstColor.name)) } } } def export(user: String) = Auth { implicit ctx => _ => Env.security.forms.emptyWithCaptcha map { case (form, captcha) => Ok(html.game.export(user, form, captcha)) } } def exportConfirm(user: String) = AuthBody { implicit ctx => me => implicit val req = ctx.body val userId = user.toLowerCase if (me.id == userId) Env.security.forms.empty.bindFromRequest.fold( err => Env.security.forms.anyCaptcha map { captcha => BadRequest(html.game.export(userId, err, captcha)) }, _ => fuccess { play.api.Logger("export").info(s"$user from ${ctx.req.remoteAddress}") import org.joda.time.DateTime import org.joda.time.format.DateTimeFormat val date = (DateTimeFormat forPattern "yyyy-MM-dd") print new DateTime Ok.chunked(Env.api.pgnDump exportUserGames userId).withHeaders( CONTENT_TYPE -> ContentTypes.TEXT, CONTENT_DISPOSITION -> ("attachment; filename=" + s"lichess_${me.username}_$date.pgn")) }) else notFound } }
pavelo65/lila
app/controllers/Game.scala
Scala
mit
2,169
package uk.ac.ncl.openlab.intake24.sql.tools.food import java.io.{File, FileWriter} import com.opencsv.CSVWriter import org.rogach.scallop.ScallopConf import uk.ac.ncl.openlab.intake24.api.data.admin.{CategoryHeader, FoodHeader} import uk.ac.ncl.openlab.intake24.foodsql.admin.FoodsAdminImpl import uk.ac.ncl.openlab.intake24.foodsql.foodindex.FoodIndexDataImpl import uk.ac.ncl.openlab.intake24.foodsql.user.FoodDataServiceImpl import uk.ac.ncl.openlab.intake24.sql.tools._ import scala.language.reflectiveCalls object AssociatedFoodsExport extends App with DatabaseConnection with WarningMessage { val options = new ScallopConf(args) { val dbConfigDir = opt[String](required = true) val outputFile = opt[String](required = true) val locale = opt[String](required = true) } options.verify() val databaseConfig = DatabaseConfigChooser.chooseDatabaseConfiguration(options.dbConfigDir()) val dataSource = getDataSource(databaseConfig) val indexService = new FoodIndexDataImpl(dataSource) val foodAdminService = new FoodsAdminImpl(dataSource) val foodDataService = new FoodDataServiceImpl(dataSource) val csvWriter = new CSVWriter(new FileWriter(new File(options.outputFile()))) csvWriter.writeNext(Array("Intake24 code", "English description", "Local description", "Associated food code", "Associated food name", "Associated category code", "Associated category name", "Prompt text", "Link as main food", "Generic name")) indexService.indexableFoods(options.locale()) match { case Right(foods) => foods.sortBy(_.code).foreach { header => println(header.code) val foodRecord = foodAdminService.getFoodRecord(header.code, options.locale()).right.get val foodEnglishDescription = foodRecord.main.englishDescription val associatedFoods = foodRecord.local.associatedFoods if (associatedFoods.nonEmpty) { associatedFoods.foreach { af => af.foodOrCategoryHeader match { case Left(FoodHeader(code, englishDescription, localDescription)) => csvWriter.writeNext(Array(header.code, foodEnglishDescription, header.localDescription, code, englishDescription, "", "", af.promptText, af.linkAsMain.toString, af.genericName)) case Right(CategoryHeader(code, englishDescription, localDescription, isHidden)) => csvWriter.writeNext(Array(header.code, foodEnglishDescription, header.localDescription, "", "", code, englishDescription, af.promptText, af.linkAsMain.toString, af.genericName)) } } } } } csvWriter.close() println("Done!") }
digitalinteraction/intake24
DatabaseTools/src/main/scala/uk/ac/ncl/openlab/intake24/sql/tools/food/AssociatedFoodsExport.scala
Scala
apache-2.0
2,712
package lib import scala.util.Random import java.security.SecureRandom object TokenGenerator { private[this] val random = new Random(new SecureRandom()) private[this] val Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" def generate(n: Int = 80): String = { Stream.continually(random.nextInt(Alphabet.size)).map(Alphabet).take(n).mkString } }
Seanstoppable/apidoc
api/app/lib/TokenGenerator.scala
Scala
mit
387
/* * Copyright 2015 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.atlas.aws import java.io.File import com.amazonaws.auth.AWSCredentials import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.auth.PropertiesCredentials object FileCredentialsProvider { def apply(path: String): FileCredentialsProvider = { new FileCredentialsProvider(new File(path)) } } /** Wraps the Amazon PropertiesCredentials class in an provider interface. */ class FileCredentialsProvider(file: File) extends AWSCredentialsProvider { @volatile private var ref: AWSCredentials = newCredentials private def newCredentials: AWSCredentials = new PropertiesCredentials(file) def getCredentials: AWSCredentials = ref def refresh() { ref = newCredentials } }
rspieldenner/atlas
atlas-aws/src/main/scala/com/netflix/atlas/aws/FileCredentialsProvider.scala
Scala
apache-2.0
1,321
package code package export import java.io.{ByteArrayInputStream, ByteArrayOutputStream, _} import code.model._ import code.service.ReportService import code.service.TaskItemService.IntervalQuery import code.util.I18n import net.liftweb.http.S import net.liftweb.util.Props import org.apache.poi.hssf.usermodel._ import org.apache.poi.poifs.filesystem.POIFSFileSystem import org.apache.poi.ss.usermodel.Cell import org.joda.time.{LocalDate, YearMonth} import net.liftweb.common.Box import scala.collection.JavaConversions._ /** * Excel export features. */ object ExcelExport { val templatePath = Props.get("export.excel.timesheet_template").openOrThrowException("No Excel template defined for Timesheets!") /** * Timesheet Excel export. */ def exportTimesheet(userBox: Box[User], offset: Int) = { val user = userBox.openOrThrowException("No user found!") var fos: ByteArrayOutputStream = null; var array: Array[Byte] = null val yearMonth = new YearMonth(LocalDate.now().plusDays(offset)) try { // template val fs = new POIFSFileSystem(new FileInputStream(templatePath)) val workbook = new HSSFWorkbook(fs, true) // parameters val userName = user.lastName + " " + user.firstName val monthText = I18n.Dates.printLongForm(yearMonth, S.locale) val dates = ReportService.getTimesheetData(IntervalQuery(yearMonth.toInterval), userBox) // spreadsheet to export val sheet = workbook.getSheet("Timesheet") /** * Finds and returns the first cell in the sheet that contains the given text. */ def findCell(text: String): Option[Cell] = { for (row <- sheet.rowIterator(); cell <- row) { if (cell.getCellType() == 1 && text.equals(cell.getStringCellValue())) { return Some(cell) } } None } // localize texts findCell("{ta_text_date}") foreach { cell => cell.setCellValue(S.?("timesheet.date")) } findCell("{ta_text_arrival}") foreach { cell => cell.setCellValue(S.?("timesheet.arrival")) } findCell("{ta_text_leave}") foreach { cell => cell.setCellValue(S.?("timesheet.leave")) } findCell("{ta_text_time_sum_hour}") foreach { cell => cell.setCellValue(S.?("timesheet.time_sum_hour")) } // insert data findCell("{ta_name}") foreach { cell => cell.setCellValue(userName) } findCell("{ta_month}") foreach { cell => cell.setCellValue(monthText) } for (i <- 1 to 31) { val data = if (dates.size >= i) dates(i - 1) else (null, null, null, null) findCell("{ta_date_" + i + "}") foreach { cell => cell.setCellValue(if (data._1 != null) data._1.toString else null) } findCell("{ta_arrive_" + i + "}") foreach { cell => cell.setCellValue(data._2) } findCell("{ta_leave_" + i + "}") foreach { cell => cell.setCellValue(data._3) } } // write sheet fos = new ByteArrayOutputStream() workbook.write(fos) } finally { if (fos != null) { try { fos.flush(); array = fos.toByteArray fos.close(); } catch { case e: IOException => e.printStackTrace } } } val contentStream = new ByteArrayInputStream(array) val name = s"timesheet_${yearMonth.toString}.xls"; (contentStream, name) } }
dodie/time-admin
src/main/scala/code/export/ExcelExport.scala
Scala
apache-2.0
3,349
/********************************************************************************************************************** * This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management * * * * Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. * * * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed * * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for * * the specific language governing permissions and limitations under the License. * **********************************************************************************************************************/ package scrupal.core import com.reactific.helpers.{Identifiable, Patterns, Pluralizer} import play.api.mvc._ import scala.reflect.{ClassTag,classTag} /** Provider Of Reactors * * Scrupal objects that mix in this trait participate in the routing of RequestHeaders to a * [[scrupal.core.Reactor]]. Providers work very much like Play's Router and can even use the SIRD DSL for matching * RequestHeader. The only difference is that a Provider is a partial function returning a Reactor instead of a * Play Action. The Reactor's Response is converted into a Play Result by the core module. To implement a * Provider, just implement the provide method as a PartialFunction[RequestHeader,Reaction]. For Example: * {{{ * def provide : ReactionRoutes = { * case GET(p"/foo") => NodeIdReactor(23) * } * }}} */ trait Provider { self ⇒ type ReactionRoutes = PartialFunction[RequestHeader,Reactor] def provide : ReactionRoutes def reactorFor(request: RequestHeader) : Option[Reactor] = { val possibleRoutes = provide possibleRoutes.lift(request) } def prefixRoutes(prefix: String): Provider = { if (prefix == "/") { self } else { new Provider { def provide = { val p = if (prefix.endsWith("/")) prefix else prefix + "/" val prefixed: PartialFunction[RequestHeader, RequestHeader] = { case rh: RequestHeader if rh.path.startsWith(p) => rh.copy(path = rh.path.drop(p.length)) } Function.unlift(prefixed.lift.andThen(_.flatMap(self.provide.lift))) } override def prefixRoutes(prefix: String) : Provider = self.prefixRoutes(prefix) } } } } object Provider { val emptyReactionRoutes : Provider#ReactionRoutes = PartialFunction.empty[RequestHeader,Reactor] val empty = new Provider { def provide : ReactionRoutes = emptyReactionRoutes } } trait IdentifiableProvider extends Provider with Identifiable { override def toString : String = s"Provider(${id.name})" } /** Delegating Provider of Reactors * * This Reactor Provider just contains a set of delegates to which it delegates the job of providing the Reactors */ trait DelegatingProvider extends Provider { def delegates : Iterable[Provider] def isTerminal : Boolean = delegates.isEmpty override def provide : ReactionRoutes = { delegates.foldLeft(Provider.emptyReactionRoutes) { case (accum,next) ⇒ accum.orElse(next.provide) } } } trait EnablementProvider[T <: EnablementProvider[T]] extends DelegatingProvider with Enablement[T] with Enablee { def isEnabledProvider(e : Enablee) : Boolean = { e.isInstanceOf[Provider] && isEnabled(e, this) } def delegates : Iterable[Provider] = { mapIf[Provider]{ isEnabledProvider } { e : Enablee ⇒ e.asInstanceOf[Provider] } } def delegatesOfType[P <: Provider with Enablee : ClassTag] = { mapIf[P] { e : Enablee ⇒ classTag[P].runtimeClass.isInstance(e) && isEnabled(e,this) } { e : Enablee ⇒ e.asInstanceOf[P] } } def provideFor[P <: Provider with Enablee : ClassTag] : ReactionRoutes = { delegatesOfType[P].foldLeft(Provider.emptyReactionRoutes) { case (accum, next) ⇒ accum.orElse(next.provide) } } } trait SingularProvider extends IdentifiableProvider { /** The routes for the singular prefix case */ def singularRoutes : ReactionRoutes /** Singular form of the entity's label */ final val singularPrefix = { require(!label.startsWith("/"),"SingularProvider names must not start with a /") makeKey(label) } lazy val provide: ReactionRoutes = { prefixRoutes(singularRoutes, singularPrefix) } def isSingular(request: RequestHeader): Boolean = { request.path.startsWith(singularPrefix) || request.path.startsWith("/" + singularPrefix) } /** Change Routes To Require A Prefix * This function is used to prefix the routes provided by singularRoutes with the singularPrefix, as invoked * in the provide method. It is also used in PluralProvider for the same purpose. The ideas that the routes * for this provider might be GET(p"/foo") but in a SingularProvider named "bar", the route matched will actually * be GET(p"/bar/foo"). This allows the routes to vary depending on the name of the provider. * @param routes The routes to be modified * @param prefix The prefix to prepend to the routes * @return ReactionRoutes with the prefix prepended to the routes */ protected def prefixRoutes(routes: ReactionRoutes, prefix: String): ReactionRoutes = { assert(!prefix.startsWith("/")) assert(!prefix.startsWith("-")) val p = s"/$prefix" val prefixed: PartialFunction[RequestHeader, RequestHeader] = { case header: RequestHeader if header.path.startsWith(p) => header.copy(path = header.path.drop(p.length)) } Function.unlift(prefixed.lift.andThen(_.flatMap(routes.lift))) } /** Key For Identifying This Provider * * When matching a path, it is helpful to quickly identify which ActionProvider to apply to a given path. To that * end, the key provides a constant path segment value that identifies this Provider. For example, if * your path was /foo/bar/doit then foo and bar are potential keys as they might separately identify * an Provider "foo" that contains an Provider "bar". The "doit" suffix is not a candidate for an * ActionProvider's key because it is not / terminated. Keys are path segments and must occur only between slashes. * * Strings returned by key will be URL sanitized. They should therefore match the regular expression for URL * path characters ( [-A-Za-z0-9_~]+ ). Any characters not matching the regular expression will be converted * to a dash. * * @return The constant string used to identify this ActionProvider */ protected def makeKey(name: String) = { name.toLowerCase.replaceAll(Patterns.NotAllowedInUrl.pattern.pattern, "-") } } trait PluralProvider extends SingularProvider { /** The routes for the plural prefix case */ def pluralRoutes : ReactionRoutes /** Plural form of the entity's label */ final val pluralPrefix = makeKey(Pluralizer.pluralize(label)) override lazy val provide: ReactionRoutes = { val prefixedSingular = prefixRoutes(singularRoutes, singularPrefix) val prefixedPlural = prefixRoutes(pluralRoutes, pluralPrefix) prefixedPlural.orElse(prefixedSingular) } def isPlural(request: RequestHeader) : Boolean = { request.path.startsWith(pluralPrefix) || request.path.startsWith("/" + pluralPrefix) } } /** A provide of NodeReactor * * This adapts a node to being a provide of a NodeReactor that just uses the node. class NodeProvider(nodeF: PartialFunction[RequestHeader, Node]) extends Provider { def provide : ReactionRoutes = nodeF.andThen { node : Node ⇒ NodeReactor(node) } } class SingleNodeProvider(node: Node) extends Provider { def provide : ReactionRoutes = { case request: RequestHeader ⇒ NodeReactor(node) } } */
scrupal/scrupal-core
scrupal-server/src/main/scala/scrupal/core/Provider.scala
Scala
apache-2.0
8,794
package ru.wordmetrix.dreamcrammer.db import io.Source class LoadText(db : DB) extends Load(db) { var count = 0 def apply(source : Source, tags : List[Word]) { println("!!") loadPhraseNWords( phraseNWords(source.getLines/*map(x => { count = count + 1; if (count % 10000 == 0) println(count); x} ))*/.mkString("")), tags) } }
electricmind/dreamcrammer
src/main/ru/wordmetrix/dreamcrammer/db/loadtext.scala
Scala
apache-2.0
350
/* * Copyright 2017 Zhang Di * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.dizhang.seqspark.annot import org.apache.spark.broadcast.Broadcast import org.dizhang.seqspark.annot.VariantAnnotOp._ import org.dizhang.seqspark.ds._ import org.dizhang.seqspark.util.Constant import scala.language.implicitConversions /** * Created by zhangdi on 8/16/16. */ @SerialVersionUID(1L) class VariantAnnotOp[A](val v: Variant[A]) extends Serializable { def annotateByVariant(dict: Broadcast[RefGene]): Variant[A] = { if (! v.alt.matches("""[ATCG]+""")) { v.addInfo(IK.anno, F.Unknown.toString) v } else { val variation = v.toVariation() val annot = IntervalTree.lookup(dict.value.loci, variation).filter(l => dict.value.seq.contains(l.mRNAName)).map{l => (l.geneName, l.mRNAName, l.annotate(variation, dict.value.seq(l.mRNAName)))} annot match { case Nil => //logger.warn(s"no annotation for variant ${variation.toString}") v.addInfo(IK.anno, F.InterGenic.toString) v case _ => //val consensus = annot.map(p => (p._1, p._3)) // .reduce((a, b) => if (FM(a._2) < FM(b._2)) a else b) val merge = annot.map(p => (p._1, s"${p._2}:${p._3.toString}")).groupBy(_._1) .map{case (k, value) => "%s::%s".format(k, value.map(x => x._2).mkString(","))} .mkString(",,") v.addInfo(IK.anno, merge) v } } } def groupByGene(onlyFunctional: Boolean = true): Array[(String, Variant[A])] = { /** the argument RefGene is the whole set of all genes involved * the output RefGene each represents a single gene * */ val variation = v.toVariation() val anno = v.parseInfo(IK.anno) val genes = if (FM(worstAnnotation(anno)) >= FM(F.InterGenic)) Array[(String, F.Value)]() else parseAnnotation(v.parseInfo(IK.anno)) val res = if (onlyFunctional) { genes.filter(p => FM(p._2) <= 4) } else { genes } res.map{ case (g, _) => g -> v.copy } } def groupByRegion(windowSize: Int, overlapSize: Int): Array[(String, Variant[A])] = { require(overlapSize < 0.5 * windowSize, "overlap must be less than half of the window size") val variantRegion = v.toRegion val unitSize = windowSize - overlapSize val unit = variantRegion.start / unitSize val unitPos = variantRegion.start % unitSize val startRegion = Region(v.chr, unit * unitSize, unit * unitSize + windowSize) val endRegion = if (variantRegion overlap Region(v.chr, (unit + 1) * unitSize)) { Region(v.chr, (unit + 1) * unitSize, (unit + 1) * unitSize + windowSize) } else { startRegion } if ((unit == 0 || unitPos > overlapSize) && startRegion == endRegion) { //v.addInfo("Region", startRegion.toString) Array(startRegion.toString -> v) } else { val v2 = v.copy //v.addInfo("Region", startRegion.toString) //v2.addInfo("Region", endRegion.toString) Array(startRegion.toString -> v, endRegion.toString -> v2) } } } object VariantAnnotOp { val F = Constant.Annotation.Feature val FM = F.values.zipWithIndex.toMap val Nucleotide = Constant.Annotation.Base val IK = Constant.Variant.InfoKey type Genes = Map[String, List[Location]] implicit def addAnnotOp[A](v: Variant[A]): VariantAnnotOp[A] = { new VariantAnnotOp[A](v) } def worstAnnotation(value: String): F.Value = { if (value == F.InterGenic.toString) { F.InterGenic } else if (value == F.CNV.toString) { F.CNV } else if (value == F.Unknown.toString) { F.Unknown } else { val genes = parseAnnotation(value) genes.map(_._2).reduce((a, b) => if (FM(a) < FM(b)) a else b) } } def parseAnnotation(value: String): Array[(String, F.Value)] = { val geneRegex = """([\w][\w-\.]*)::(\S+)""".r val trsRegex = """(\w+):(\S+)""".r val genes = value.split(",,").map{ case geneRegex(gene, rest) => gene -> rest.split(",").map{ case trsRegex(_, func) => F.withName(func) }.reduce((a, b) => if (FM(a) < FM(b)) a else b) } /** an array of genes, and their most deleterious function annotation */ genes } }
statgenetics/seqspark
src/main/scala/org/dizhang/seqspark/annot/VariantAnnotOp.scala
Scala
apache-2.0
4,806
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.storage import java.io._ import java.nio.ByteBuffer import java.nio.channels.{Channels, ReadableByteChannel, WritableByteChannel} import java.nio.channels.FileChannel.MapMode import java.util.concurrent.{ConcurrentHashMap, TimeUnit} import scala.collection.mutable.ListBuffer import com.google.common.io.Closeables import io.netty.channel.DefaultFileRegion import org.apache.commons.io.FileUtils import org.apache.spark.{SecurityManager, SparkConf} import org.apache.spark.internal.{config, Logging} import org.apache.spark.network.buffer.ManagedBuffer import org.apache.spark.network.util.{AbstractFileRegion, JavaUtils} import org.apache.spark.security.CryptoStreamUtils import org.apache.spark.unsafe.array.ByteArrayMethods import org.apache.spark.util.Utils import org.apache.spark.util.io.ChunkedByteBuffer /** * Stores BlockManager blocks on disk. */ private[spark] class DiskStore( conf: SparkConf, diskManager: DiskBlockManager, securityManager: SecurityManager) extends Logging { private val minMemoryMapBytes = conf.get(config.STORAGE_MEMORY_MAP_THRESHOLD) private val maxMemoryMapBytes = conf.get(config.MEMORY_MAP_LIMIT_FOR_TESTS) private val blockSizes = new ConcurrentHashMap[BlockId, Long]() def getSize(blockId: BlockId): Long = blockSizes.get(blockId) /** * Invokes the provided callback function to write the specific block. * * @throws IllegalStateException if the block already exists in the disk store. */ def put(blockId: BlockId)(writeFunc: WritableByteChannel => Unit): Unit = { if (contains(blockId)) { logWarning(s"Block $blockId is already present in the disk store") try { diskManager.getFile(blockId).delete() } catch { case e: Exception => throw new IllegalStateException( s"Block $blockId is already present in the disk store and could not delete it $e") } } logDebug(s"Attempting to put block $blockId") val startTimeNs = System.nanoTime() val file = diskManager.getFile(blockId) val out = new CountingWritableChannel(openForWrite(file)) var threwException: Boolean = true try { writeFunc(out) blockSizes.put(blockId, out.getCount) threwException = false } finally { try { out.close() } catch { case ioe: IOException => if (!threwException) { threwException = true throw ioe } } finally { if (threwException) { remove(blockId) } } } logDebug(s"Block ${file.getName} stored as ${Utils.bytesToString(file.length())} file" + s" on disk in ${TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNs)} ms") } def putBytes(blockId: BlockId, bytes: ChunkedByteBuffer): Unit = { put(blockId) { channel => bytes.writeFully(channel) } } def getBytes(blockId: BlockId): BlockData = { getBytes(diskManager.getFile(blockId.name), getSize(blockId)) } def getBytes(f: File, blockSize: Long): BlockData = securityManager.getIOEncryptionKey() match { case Some(key) => // Encrypted blocks cannot be memory mapped; return a special object that does decryption // and provides InputStream / FileRegion implementations for reading the data. new EncryptedBlockData(f, blockSize, conf, key) case _ => new DiskBlockData(minMemoryMapBytes, maxMemoryMapBytes, f, blockSize) } def remove(blockId: BlockId): Boolean = { blockSizes.remove(blockId) val file = diskManager.getFile(blockId.name) if (file.exists()) { val ret = file.delete() if (!ret) { logWarning(s"Error deleting ${file.getPath()}") } ret } else { false } } /** * @param blockSize if encryption is configured, the file is assumed to already be encrypted and * blockSize should be the decrypted size */ def moveFileToBlock(sourceFile: File, blockSize: Long, targetBlockId: BlockId): Unit = { blockSizes.put(targetBlockId, blockSize) val targetFile = diskManager.getFile(targetBlockId.name) FileUtils.moveFile(sourceFile, targetFile) } def contains(blockId: BlockId): Boolean = diskManager.containsBlock(blockId) private def openForWrite(file: File): WritableByteChannel = { val out = new FileOutputStream(file).getChannel() try { securityManager.getIOEncryptionKey().map { key => CryptoStreamUtils.createWritableChannel(out, conf, key) }.getOrElse(out) } catch { case e: Exception => Closeables.close(out, true) file.delete() throw e } } } private class DiskBlockData( minMemoryMapBytes: Long, maxMemoryMapBytes: Long, file: File, blockSize: Long) extends BlockData { override def toInputStream(): InputStream = new FileInputStream(file) /** * Returns a Netty-friendly wrapper for the block's data. * * Please see `ManagedBuffer.convertToNetty()` for more details. */ override def toNetty(): AnyRef = new DefaultFileRegion(file, 0, size) override def toChunkedByteBuffer(allocator: (Int) => ByteBuffer): ChunkedByteBuffer = { Utils.tryWithResource(open()) { channel => var remaining = blockSize val chunks = new ListBuffer[ByteBuffer]() while (remaining > 0) { val chunkSize = math.min(remaining, maxMemoryMapBytes) val chunk = allocator(chunkSize.toInt) remaining -= chunkSize JavaUtils.readFully(channel, chunk) chunk.flip() chunks += chunk } new ChunkedByteBuffer(chunks.toArray) } } override def toByteBuffer(): ByteBuffer = { require(blockSize < maxMemoryMapBytes, s"can't create a byte buffer of size $blockSize" + s" since it exceeds ${Utils.bytesToString(maxMemoryMapBytes)}.") Utils.tryWithResource(open()) { channel => if (blockSize < minMemoryMapBytes) { // For small files, directly read rather than memory map. val buf = ByteBuffer.allocate(blockSize.toInt) JavaUtils.readFully(channel, buf) buf.flip() buf } else { channel.map(MapMode.READ_ONLY, 0, file.length) } } } override def size: Long = blockSize override def dispose(): Unit = {} private def open() = new FileInputStream(file).getChannel } private[spark] class EncryptedBlockData( file: File, blockSize: Long, conf: SparkConf, key: Array[Byte]) extends BlockData { override def toInputStream(): InputStream = Channels.newInputStream(open()) override def toNetty(): Object = new ReadableChannelFileRegion(open(), blockSize) override def toChunkedByteBuffer(allocator: Int => ByteBuffer): ChunkedByteBuffer = { val source = open() try { var remaining = blockSize val chunks = new ListBuffer[ByteBuffer]() while (remaining > 0) { val chunkSize = math.min(remaining, ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) val chunk = allocator(chunkSize.toInt) remaining -= chunkSize JavaUtils.readFully(source, chunk) chunk.flip() chunks += chunk } new ChunkedByteBuffer(chunks.toArray) } finally { source.close() } } override def toByteBuffer(): ByteBuffer = { // This is used by the block transfer service to replicate blocks. The upload code reads // all bytes into memory to send the block to the remote executor, so it's ok to do this // as long as the block fits in a Java array. assert(blockSize <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH, "Block is too large to be wrapped in a byte buffer.") val dst = ByteBuffer.allocate(blockSize.toInt) val in = open() try { JavaUtils.readFully(in, dst) dst.flip() dst } finally { Closeables.close(in, true) } } override def size: Long = blockSize override def dispose(): Unit = { } private def open(): ReadableByteChannel = { val channel = new FileInputStream(file).getChannel() try { CryptoStreamUtils.createReadableChannel(channel, conf, key) } catch { case e: Exception => Closeables.close(channel, true) throw e } } } private[spark] class EncryptedManagedBuffer( val blockData: EncryptedBlockData) extends ManagedBuffer { // This is the size of the decrypted data override def size(): Long = blockData.size override def nioByteBuffer(): ByteBuffer = blockData.toByteBuffer() override def convertToNetty(): AnyRef = blockData.toNetty() override def createInputStream(): InputStream = blockData.toInputStream() override def retain(): ManagedBuffer = this override def release(): ManagedBuffer = this } private class ReadableChannelFileRegion(source: ReadableByteChannel, blockSize: Long) extends AbstractFileRegion { private var _transferred = 0L private val buffer = ByteBuffer.allocateDirect(64 * 1024) buffer.flip() override def count(): Long = blockSize override def position(): Long = 0 override def transferred(): Long = _transferred override def transferTo(target: WritableByteChannel, pos: Long): Long = { assert(pos == transferred(), "Invalid position.") var written = 0L var lastWrite = -1L while (lastWrite != 0) { if (!buffer.hasRemaining()) { buffer.clear() source.read(buffer) buffer.flip() } if (buffer.hasRemaining()) { lastWrite = target.write(buffer) written += lastWrite } else { lastWrite = 0 } } _transferred += written written } override def deallocate(): Unit = source.close() } private class CountingWritableChannel(sink: WritableByteChannel) extends WritableByteChannel { private var count = 0L def getCount: Long = count override def write(src: ByteBuffer): Int = { val written = sink.write(src) if (written > 0) { count += written } written } override def isOpen(): Boolean = sink.isOpen() override def close(): Unit = sink.close() }
ueshin/apache-spark
core/src/main/scala/org/apache/spark/storage/DiskStore.scala
Scala
apache-2.0
10,891
package blended.streams.testsupport import akka.util.ByteString import blended.streams.message.{FlowEnvelope, FlowMessage} import blended.testsupport.scalatest.LoggingFreeSpec import org.scalatest.Matchers class FlowMessageAssertionSpec extends LoggingFreeSpec with Matchers { "The FlowMessageAssertions should" - { "support to check expected string bodies" in { val env = FlowEnvelope(FlowMessage("Hello Blended!")(FlowMessage.noProps)) FlowMessageAssertion.checkAssertions(env)( ExpectedBodies(Some("Hello Blended!")) ) should be (empty) FlowMessageAssertion.checkAssertions(env)( ExpectedBodies(Some("foo")) ) should have size 1 FlowMessageAssertion.checkAssertions(env)( ExpectedBodies(Some(100)) ) should have size 1 } "support to check expected byte string bodies" in { val env = FlowEnvelope(FlowMessage(ByteString("Hello Blended!"))(FlowMessage.noProps)) FlowMessageAssertion.checkAssertions(env)( ExpectedBodies(Some(ByteString("Hello Blended!"))) ) should be (empty) FlowMessageAssertion.checkAssertions(env)( ExpectedBodies(Some("foo")) ) should have size 1 } } }
lefou/blended
blended.streams.testsupport/src/test/scala/blended/streams/testsupport/FlowMessageAssertionSpec.scala
Scala
apache-2.0
1,222
/* * External Imports * */ //Ammonite imports import ammonite.ops._ //Import breeze for linear algebra import breeze.linalg.{DenseVector, DenseMatrix, diag} import breeze.stats.distributions._ //Apache Spark for big data support import org.apache.spark.SparkContext import org.apache.spark.SparkConf import org.apache.spark.rdd.RDD //Load Wisp-Highcharts for plotting import io.github.mandar2812.dynaml.graphics.charts.Highcharts._ //Import spire implicits for definition of //fields, algebraic structures on primitive types import spire.implicits._ /* * DynaML imports * */ import io.github.mandar2812.dynaml.analysis.VectorField import io.github.mandar2812.dynaml.analysis.implicits._ import io.github.mandar2812.dynaml.algebra._ //Load 3d graphics capabilities import io.github.mandar2812.dynaml.graphics.plot3d //The pipes API import io.github.mandar2812.dynaml.pipes._ import io.github.mandar2812.dynaml.DynaMLPipe import io.github.mandar2812.dynaml.DynaMLPipe._ //Load the DynaML model api members import io.github.mandar2812.dynaml.models._ import io.github.mandar2812.dynaml.models.neuralnets._ import io.github.mandar2812.dynaml.models.svm._ import io.github.mandar2812.dynaml.models.lm._ //Utility functions import io.github.mandar2812.dynaml.utils //Kernels for GP,SVM models import io.github.mandar2812.dynaml.kernels._ //Shell examples import io.github.mandar2812.dynaml.examples._ //Load neural net primitives import io.github.mandar2812.dynaml.models.neuralnets.TransferFunctions._ //The probability API import io.github.mandar2812.dynaml.probability._ import io.github.mandar2812.dynaml.probability.distributions._ //Wavelet API import io.github.mandar2812.dynaml.wavelets._ //OpenML support import io.github.mandar2812.dynaml.openml.OpenML //Spark support import io.github.mandar2812.dynaml.DynaMLSpark._ //Renjin imports import javax.script._ //TensorFlow imports import _root_.io.github.mandar2812.dynaml.tensorflow._ import _root_.io.github.mandar2812.dynaml.tensorflow.implicits._ import org.renjin.script._ import org.renjin.sexp._ val r_engine_factory = new RenjinScriptEngineFactory() implicit val renjin = r_engine_factory.getScriptEngine() val r: String => SEXP = (s: String) => renjin.eval(s).asInstanceOf[SEXP] val R: java.io.File => Unit = (f: java.io.File) => renjin.eval(f)
transcendent-ai-labs/DynaML
conf/DynaMLInit.scala
Scala
apache-2.0
2,305
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive import org.apache.hadoop.conf.Configuration import org.apache.spark.SparkConf import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.types.StructType /** * Test suite for the [[HiveExternalCatalog]]. */ class HiveExternalCatalogSuite extends ExternalCatalogSuite { private val externalCatalog: HiveExternalCatalog = { val catalog = new HiveExternalCatalog(new SparkConf, new Configuration) catalog.client.reset() catalog } protected override val utils: CatalogTestUtils = new CatalogTestUtils { override val tableInputFormat: String = "org.apache.hadoop.mapred.SequenceFileInputFormat" override val tableOutputFormat: String = "org.apache.hadoop.mapred.SequenceFileOutputFormat" override def newEmptyCatalog(): ExternalCatalog = externalCatalog override val defaultProvider: String = "hive" } protected override def resetState(): Unit = { externalCatalog.client.reset() } import utils._ test("SPARK-18647: do not put provider in table properties for Hive serde table") { val catalog = newBasicCatalog() val hiveTable = CatalogTable( identifier = TableIdentifier("hive_tbl", Some("db1")), tableType = CatalogTableType.MANAGED, storage = storageFormat, schema = new StructType().add("col1", "int").add("col2", "string"), provider = Some("hive")) catalog.createTable(hiveTable, ignoreIfExists = false) val rawTable = externalCatalog.client.getTable("db1", "hive_tbl") assert(!rawTable.properties.contains(HiveExternalCatalog.DATASOURCE_PROVIDER)) assert(DDLUtils.isHiveTable(externalCatalog.getTable("db1", "hive_tbl"))) } Seq("parquet", "hive").foreach { format => test(s"Partition columns should be put at the end of table schema for the format $format") { val catalog = newBasicCatalog() val newSchema = new StructType() .add("col1", "int") .add("col2", "string") .add("partCol1", "int") .add("partCol2", "string") val table = CatalogTable( identifier = TableIdentifier("tbl", Some("db1")), tableType = CatalogTableType.MANAGED, storage = CatalogStorageFormat.empty, schema = new StructType() .add("col1", "int") .add("partCol1", "int") .add("partCol2", "string") .add("col2", "string"), provider = Some(format), partitionColumnNames = Seq("partCol1", "partCol2")) catalog.createTable(table, ignoreIfExists = false) val restoredTable = externalCatalog.getTable("db1", "tbl") assert(restoredTable.schema == newSchema) } } }
aokolnychyi/spark
sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala
Scala
apache-2.0
3,572
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.util.collection import java.util.Arrays /** * A simple, fixed-size bit set implementation. This implementation is fast because it avoids * safety/bound checking. */ class OapBitSet(numBits: Int) extends Serializable { private val words = new Array[Long](bit2words(numBits)) private val numWords = words.length /** * Compute the capacity (number of bits) that can be represented * by this bitset. */ def capacity: Int = numWords * 64 def toLongArray(): Array[Long] = words def this(from: Array[Long]) { this(from.length << 6) for(i <- from.indices) { words(i) = from(i) } } /** * Clear all set bits. */ def clear(): Unit = Arrays.fill(words, 0) /** * Set all the bits up to a given index */ def setUntil(bitIndex: Int): Unit = { val wordIndex = bitIndex >> 6 // divide by 64 Arrays.fill(words, 0, wordIndex, -1) if(wordIndex < words.length) { // Set the remaining bits (note that the mask could still be zero) val mask = ~(-1L << (bitIndex & 0x3f)) words(wordIndex) |= mask } } /** * Clear all the bits up to a given index */ def clearUntil(bitIndex: Int): Unit = { val wordIndex = bitIndex >> 6 // divide by 64 Arrays.fill(words, 0, wordIndex, 0) if(wordIndex < words.length) { // Clear the remaining bits val mask = -1L << (bitIndex & 0x3f) words(wordIndex) &= mask } } /** * Compute the bit-wise AND of the two sets returning the * result. */ def &(other: OapBitSet): OapBitSet = { val newBS = new OapBitSet(math.max(capacity, other.capacity)) val smaller = math.min(numWords, other.numWords) assert(newBS.numWords >= numWords) assert(newBS.numWords >= other.numWords) var ind = 0 while( ind < smaller ) { newBS.words(ind) = words(ind) & other.words(ind) ind += 1 } newBS } /** * Compute the bit-wise OR of the two sets returning the * result. */ def |(other: OapBitSet): OapBitSet = { val newBS = new OapBitSet(math.max(capacity, other.capacity)) assert(newBS.numWords >= numWords) assert(newBS.numWords >= other.numWords) val smaller = math.min(numWords, other.numWords) var ind = 0 while( ind < smaller ) { newBS.words(ind) = words(ind) | other.words(ind) ind += 1 } while( ind < numWords ) { newBS.words(ind) = words(ind) ind += 1 } while( ind < other.numWords ) { newBS.words(ind) = other.words(ind) ind += 1 } newBS } /** * Compute the symmetric difference by performing bit-wise XOR of the two sets returning the * result. */ def ^(other: OapBitSet): OapBitSet = { val newBS = new OapBitSet(math.max(capacity, other.capacity)) val smaller = math.min(numWords, other.numWords) var ind = 0 while (ind < smaller) { newBS.words(ind) = words(ind) ^ other.words(ind) ind += 1 } if (ind < numWords) { Array.copy( words, ind, newBS.words, ind, numWords - ind ) } if (ind < other.numWords) { Array.copy( other.words, ind, newBS.words, ind, other.numWords - ind ) } newBS } /** * Compute the difference of the two sets by performing bit-wise AND-NOT returning the * result. */ def andNot(other: OapBitSet): OapBitSet = { val newBS = new OapBitSet(capacity) val smaller = math.min(numWords, other.numWords) var ind = 0 while (ind < smaller) { newBS.words(ind) = words(ind) & ~other.words(ind) ind += 1 } if (ind < numWords) { Array.copy( words, ind, newBS.words, ind, numWords - ind ) } newBS } /** * Sets the bit at the specified index to true. * @param index the bit index */ def set(index: Int) { val bitmask = 1L << (index & 0x3f) // mod 64 and shift words(index >> 6) |= bitmask // div by 64 and mask } def unset(index: Int) { val bitmask = 1L << (index & 0x3f) // mod 64 and shift words(index >> 6) &= ~bitmask // div by 64 and mask } /** * Return the value of the bit with the specified index. The value is true if the bit with * the index is currently set in this BitSet; otherwise, the result is false. * * @param index the bit index * @return the value of the bit with the specified index */ def get(index: Int): Boolean = { val bitmask = 1L << (index & 0x3f) // mod 64 and shift (words(index >> 6) & bitmask) != 0 // div by 64 and mask } /** * Get an iterator over the set bits. */ def iterator: Iterator[Int] = new Iterator[Int] { var ind = nextSetBit(0) override def hasNext: Boolean = ind >= 0 override def next(): Int = { val tmp = ind ind = nextSetBit(ind + 1) tmp } } /** Return the number of bits set to true in this BitSet. */ def cardinality(): Int = { var sum = 0 var i = 0 while (i < numWords) { sum += java.lang.Long.bitCount(words(i)) i += 1 } sum } /** * Returns the index of the first bit that is set to true that occurs on or after the * specified starting index. If no such bit exists then -1 is returned. * * To iterate over the true bits in a BitSet, use the following loop: * * for (int i = bs.nextSetBit(0); i >= 0; i = bs.nextSetBit(i+1)) { * // operate on index i here * } * * @param fromIndex the index to start checking from (inclusive) * @return the index of the next set bit, or -1 if there is no such bit */ def nextSetBit(fromIndex: Int): Int = { var wordIndex = fromIndex >> 6 if (wordIndex >= numWords) { return -1 } // Try to find the next set bit in the current word val subIndex = fromIndex & 0x3f var word = words(wordIndex) >> subIndex if (word != 0) { return (wordIndex << 6) + subIndex + java.lang.Long.numberOfTrailingZeros(word) } // Find the next set bit in the rest of the words wordIndex += 1 while (wordIndex < numWords) { word = words(wordIndex) if (word != 0) { return (wordIndex << 6) + java.lang.Long.numberOfTrailingZeros(word) } wordIndex += 1 } -1 } /** Return the number of longs it would take to hold numBits. */ private def bit2words(numBits: Int) = ((numBits - 1) >> 6) + 1 }
Intel-bigdata/OAP
oap-cache/oap/src/main/scala/org/apache/spark/util/collection/OapBitSet.scala
Scala
apache-2.0
7,156
package controllers.admin import javax.inject.Inject import models.admin.{Category, CategoryTable} import play.api.Play import play.api.data.Form import play.api.data.Forms._ import play.api.db.slick.DatabaseConfigProvider import play.api.i18n.{I18nSupport, MessagesApi} import play.api.mvc.Controller import slick.driver.JdbcProfile import scala.slick.driver.MySQLDriver.simple._ /** * Created by Murat. */ class CategoryController @Inject() (val messagesApi: MessagesApi) extends Controller with Secured with I18nSupport{ lazy val categories = TableQuery[CategoryTable] val db = DatabaseConfigProvider.get[JdbcProfile](Play.current).db def getCategories = db.withSession { implicit session => categories.list } val form = Form( mapping( "id" -> ignored[Option[Long]](None), "name" -> nonEmptyText )(Category.apply) (Category.unapply) ) def list = withAuth{ username => implicit rs => Ok(views.html.admin.category.list(form, getCategories)) } def add = withAuth{ username => implicit rs => form.bindFromRequest.fold( formWithErrors => { BadRequest(views.html.admin.category.list(formWithErrors, getCategories)) }, category => { db.withSession { implicit session => categories.insert(category)} Redirect(routes.CategoryController.list()) } ) } def edit(id: Long) = withAuth{ username => implicit rs => val category = db.withSession { implicit session => categories.filter(_.id === id).firstOption } if(category.isDefined) Ok(views.html.admin.category.edit(category.get, form.fill(category.get))) else NotFound("Not FOund") } def updateCategory(id: Long) = withAuth{ username => implicit rs => val category = form.bindFromRequest.get val categoryToUpdate: Category = category.copy(Some(id)) db.withSession { implicit session => categories.filter(_.id === id).update(categoryToUpdate)} Redirect(routes.CategoryController.list()) } def delete(id: Long) = withAuth{ username => implicit rs => db.withSession { implicit session => categories.filter(_.id === id).delete} Redirect(routes.CategoryController.list()) } }
mustafin/ent-quiz-server
modules/admin/app/controllers/admin/CategoryController.scala
Scala
apache-2.0
2,182
package mesosphere.marathon.api.v2 import mesosphere.marathon.MarathonSpec import mesosphere.marathon.api.v2.json.{ V2AppDefinition, V2GroupUpdate } import mesosphere.marathon.state.Container.Docker.PortMapping import mesosphere.marathon.state.Container._ import mesosphere.marathon.state.PathId._ import mesosphere.marathon.state._ import org.apache.mesos.Protos.ContainerInfo.DockerInfo.Network import org.scalatest.{ BeforeAndAfterAll, Matchers, OptionValues } import scala.collection.immutable.Seq class ModelValidationTest extends MarathonSpec with Matchers with BeforeAndAfterAll with OptionValues { test("A group update should pass validation") { val update = V2GroupUpdate(id = Some("/a/b/c".toPath)) val violations = ModelValidation.checkGroupUpdate(update, true) violations should have size 0 } test("A group can not be updated to have more than the configured number of apps") { val group = Group("/".toPath, Set( createServicePortApp("/a".toPath, 0).toAppDefinition, createServicePortApp("/b".toPath, 0).toAppDefinition, createServicePortApp("/c".toPath, 0).toAppDefinition )) val validations = ModelValidation.checkGroup(group, "", PathId.empty, maxApps = Some(2)) validations should not be Nil validations.find(_.getMessage.contains("This Marathon instance may only handle up to 2 Apps!")) should be ('defined) val noValidations = ModelValidation.checkGroup(group, "", PathId.empty, maxApps = Some(10)) noValidations should be('empty) } test("Model validation should catch new apps that conflict with service ports in existing apps") { val existingApp = createServicePortApp("/app1".toPath, 3200) val group = Group(id = PathId.empty, apps = Set(existingApp.toAppDefinition)) val conflictingApp = createServicePortApp("/app2".toPath, 3200).toAppDefinition val validations = ModelValidation.checkAppConflicts(conflictingApp, group) validations should not be Nil } test("Model validation should allow new apps that do not conflict with service ports in existing apps") { val existingApp = createServicePortApp("/app1".toPath, 3200) val group = Group(id = PathId.empty, apps = Set(existingApp.toAppDefinition)) val conflictingApp = createServicePortApp("/app2".toPath, 3201).toAppDefinition val validations = ModelValidation.checkAppConflicts(conflictingApp, group) validations should be(Nil) } test("Model validation should check for application conflicts") { val existingApp = createServicePortApp("/app1".toPath, 3200) val group = Group(id = PathId.empty, apps = Set(existingApp.toAppDefinition)) val conflictingApp = existingApp.copy(id = "/app2".toPath).toAppDefinition val validations = ModelValidation.checkAppConflicts(conflictingApp, group) validations should not be Nil } test("Null groups should be validated correctly") { val result = ModelValidation.checkGroupUpdate(null, needsId = true) result should have size 1 result.head.getMessage should be("Given group is empty!") } private def createServicePortApp(id: PathId, servicePort: Int) = V2AppDefinition( id, container = Some(Container( docker = Some(Docker( image = "demothing", network = Some(Network.BRIDGE), portMappings = Some(Seq(PortMapping(2000, servicePort = servicePort))) )) )) ) }
Kosta-Github/marathon
src/test/scala/mesosphere/marathon/api/v2/ModelValidationTest.scala
Scala
apache-2.0
3,433
package com.eharmony.aloha.models import java.{lang => jl} import com.eharmony.aloha.reflect.{RefInfo, RefInfoOps} import com.eharmony.aloha.util.Logging import scala.math.ScalaNumericAnyConversions /** * Provides over 200 coercions between types. This works for the following matrix of types: * * <pre> * TO * * Bo C By Sh I L F D JBy JSh JI JL JF JD JC JBo St * +-------------------------------------------------------------------- * Bo | I bB tS * C | I A A A A A A A A A A A A A tS * By | A I A A A A A A A A A A A A tS * Sh | A A I A A A A A A A A A A A tS * I | A A A I A A A A A A A A A A tS * L | A A A A I A A A A A A A A A tS * F F | A A A A A I A A A A A A A A tS * R D | A A A A A A I A A A A A A A tS * O JBy | N N N N N N N I N N N N N N tS * M JSh | N N N N N N N N I N N N N N tS * JI | N N N N N N N N N I N N N N tS * JL | N N N N N N N N N N I N N N tS * JF | N N N N N N N N N N N I N N tS * JD | N N N N N N N N N N N N I N tS * JC | uC I tS * JBo | uB I tS * St | I * </pre> * * Where the label abbreviations are: * - '''''Bo''''': ''scala.Boolean'' - '''''C''''': ''scala.Char'' - '''''By''''': ''scala.Byte'' - '''''Sh''''': ''scala.Short'' - '''''I''''': ''scala.Int'' - '''''L''''': ''scala.Long'' - '''''F''''': ''scala.Float'' - '''''D''''': ''scala.Double'' - '''''JBy''''': ''java.lang.Byte'' - '''''JSh''''': ''java.lang.Short'' - '''''JI''''': ''java.lang.Integer'' - '''''JL''''': ''java.lang.Long'' - '''''JF''''': ''java.lang.Float'' - '''''JD''''': ''java.lang.Character'' - '''''JC''''': ''java.lang.Double'' - '''''JBo''''': ''java.lang.Boolean'' - '''''St''''': ''java.lang.String'' * * and value abbreviations are: * - '''''A''''': ''coercion from scala.AnyVal'' - '''''N''''': ''coercion from java.lang.Number'' - '''''I''''': ''identity function'' - '''''bB''''': ''boxing Boolean coercion'' - '''''uB''''': ''unboxing Boolean coercion'' - '''''uC''''': ''unboxing character coercion'' - '''''tS''''': ''toString coercion'' * * The rationale behind not supplying coercion from Strings is that too many exceptions can * be thrown during the coercion process. Coercions from String to character have issues * with the empty string. One could call one of the following but they all throw exceptions: * * {{{ * "".charAt(0) // java.lang.StringIndexOutOfBoundsException: String index out of range: 0 * "".apply(0) // java.lang.StringIndexOutOfBoundsException: String index out of range: 0 * "".head // java.util.NoSuchElementException: next on empty iterator * }}} * * Conversion to Boolean from Numbers and vice versa is not supported because there are a * number of ways to do this: * - Non-zero numbers are true, 0 is false. - One is true, all else is false * * @author R M Deak */ // TODO: Work on A = Option[C], B = Option[B] s.t. if Some[C => D], then produce Some[A => B] trait TypeCoercion { self: Logging => def coercion[A, B](implicit a: RefInfo[A], b: RefInfo[B]): Option[A => B] = { val coerceF = if (a == b) { val f: A => A = identity[A] debug(s"Using identity: ${RefInfoOps.toString[A]} == ${RefInfoOps.toString[B]}") Option(f.asInstanceOf[A => B]) } else if (RefInfoOps.isSubType[A, Option[Any]] && RefInfoOps.isSubType[B, Option[Any]]) { debug(s"Using Option => Option conversion: ${RefInfoOps.toString[A]} == ${RefInfoOps.toString[B]}") // This cast is necessary even thought it says it's not. coercion(RefInfoOps.typeParams[A].head, RefInfoOps.typeParams[B].head). asInstanceOf[Option[Any => Any]]. map(f => (x: Option[Any]) => x.map(f)). asInstanceOf[Option[A => B]] } else if (!RefInfoOps.isSubType[A, Option[Any]] && RefInfoOps.isSubType[B, Option[Any]]) { debug(s"Using _ => Option conversion: ${RefInfoOps.toString[A]} == ${RefInfoOps.toString[B]}") coercion(a, RefInfoOps.typeParams[B].head).map(f => (x: A) => Option(f(x))).asInstanceOf[Option[A => B]] } else if (b == RefInfo.String) { debug(s"Using toString: B=${RefInfoOps.toString[B]}") Option(((a: A) => a.toString).asInstanceOf[A => B]) } else if (RefInfo.Boolean == a && RefInfo.JavaBoolean == b) { debug(s"Using boxing boolean coercion: ${RefInfoOps.toString[A]} => ${RefInfoOps.toString[B]}") Option(((bool: Boolean) => jl.Boolean.valueOf(bool)).asInstanceOf[A => B]) } else if (RefInfo.JavaBoolean == a && RefInfo.Boolean == b) { debug(s"Using unboxing boolean coercion: ${RefInfoOps.toString[A]} => ${RefInfoOps.toString[B]}") Option(((bool: jl.Boolean) => bool.booleanValue).asInstanceOf[A => B]) } else if (RefInfo.JavaCharacter == a && RefInfo.Char == b) { debug(s"Using unboxing character coercion: ${RefInfoOps.toString[A]} => ${RefInfoOps.toString[B]}") Option(((c: jl.Character) => c.charValue).asInstanceOf[A => B]) } else if (RefInfoOps.isSubType[A, jl.Number]) { val f = (a: A) => a.asInstanceOf[jl.Number] val c = b match { case RefInfo.Char => Option(f.andThen(_.longValue.toChar).asInstanceOf[A => B]) case RefInfo.Byte => Option(f.andThen(_.byteValue).asInstanceOf[A => B]) case RefInfo.Short => Option(f.andThen(_.shortValue).asInstanceOf[A => B]) case RefInfo.Int => Option(f.andThen(_.intValue).asInstanceOf[A => B]) case RefInfo.Long => Option(f.andThen(_.longValue).asInstanceOf[A => B]) case RefInfo.Float => Option(f.andThen(_.floatValue).asInstanceOf[A => B]) case RefInfo.Double => Option(f.andThen(_.doubleValue).asInstanceOf[A => B]) case RefInfo.JavaCharacter => Option(f.andThen(x => jl.Character.valueOf(x.longValue.toChar)).asInstanceOf[A => B]) case RefInfo.JavaByte => Option(f.andThen(v => jl.Byte.valueOf(v.byteValue)).asInstanceOf[A => B]) case RefInfo.JavaShort => Option(f.andThen(v => jl.Short.valueOf(v.shortValue)).asInstanceOf[A => B]) case RefInfo.JavaInteger => Option(f.andThen(v => jl.Integer.valueOf(v.intValue)).asInstanceOf[A => B]) case RefInfo.JavaLong => Option(f.andThen(v => jl.Long.valueOf(v.longValue)).asInstanceOf[A => B]) case RefInfo.JavaFloat => Option(f.andThen(v => jl.Float.valueOf(v.floatValue)).asInstanceOf[A => B]) case RefInfo.JavaDouble => Option(f.andThen(v => jl.Double.valueOf(v.doubleValue)).asInstanceOf[A => B]) case _ => None } c.foreach(_ => debug(s"Using function from java.lang.Number conversion: ${RefInfoOps.toString[A]} => ${RefInfoOps.toString[B]}.")) c } else if (RefInfoOps.isSubType[A, AnyVal]) { val numConv = a match { case RefInfo.Char => Option(implicitly[Char => ScalaNumericAnyConversions].asInstanceOf[A => ScalaNumericAnyConversions]) case RefInfo.Byte => Option(implicitly[Byte => ScalaNumericAnyConversions].asInstanceOf[A => ScalaNumericAnyConversions]) case RefInfo.Short => Option(implicitly[Short => ScalaNumericAnyConversions].asInstanceOf[A => ScalaNumericAnyConversions]) case RefInfo.Int => Option(implicitly[Int => ScalaNumericAnyConversions].asInstanceOf[A => ScalaNumericAnyConversions]) case RefInfo.Long => Option(implicitly[Long => ScalaNumericAnyConversions].asInstanceOf[A => ScalaNumericAnyConversions]) case RefInfo.Float => Option(implicitly[Float => ScalaNumericAnyConversions].asInstanceOf[A => ScalaNumericAnyConversions]) case RefInfo.Double => Option(implicitly[Double => ScalaNumericAnyConversions].asInstanceOf[A => ScalaNumericAnyConversions]) case _ => None } val c = numConv.flatMap(f => b match { case RefInfo.Char => Option(f.andThen(_.toChar).asInstanceOf[A => B]) case RefInfo.Byte => Option(f.andThen(_.toByte).asInstanceOf[A => B]) case RefInfo.Short => Option(f.andThen(_.toShort).asInstanceOf[A => B]) case RefInfo.Int => Option(f.andThen(_.toInt).asInstanceOf[A => B]) case RefInfo.Long => Option(f.andThen(_.toLong).asInstanceOf[A => B]) case RefInfo.Float => Option(f.andThen(_.toFloat).asInstanceOf[A => B]) case RefInfo.Double => Option(f.andThen(_.toDouble).asInstanceOf[A => B]) case RefInfo.JavaCharacter => Option(f.andThen(v => jl.Character.valueOf(v.toChar)).asInstanceOf[A => B]) case RefInfo.JavaByte => Option(f.andThen(v => jl.Byte.valueOf(v.toByte)).asInstanceOf[A => B]) case RefInfo.JavaShort => Option(f.andThen(v => jl.Short.valueOf(v.toShort)).asInstanceOf[A => B]) case RefInfo.JavaInteger => Option(f.andThen(v => jl.Integer.valueOf(v.toInt)).asInstanceOf[A => B]) case RefInfo.JavaLong => Option(f.andThen(v => jl.Long.valueOf(v.toLong)).asInstanceOf[A => B]) case RefInfo.JavaFloat => Option(f.andThen(v => jl.Float.valueOf(v.toFloat)).asInstanceOf[A => B]) case RefInfo.JavaDouble => Option(f.andThen(v => jl.Double.valueOf(v.toDouble)).asInstanceOf[A => B]) case _ => None }) c.foreach(_ => debug(s"Using function from AnyVal conversion: ${RefInfoOps.toString[A]} => ${RefInfoOps.toString[B]}.")) c } else None if (coerceF.isEmpty) warn(s"Couldn't find type coercion function: ${RefInfoOps.toString[A]} => ${RefInfoOps.toString[B]}.") coerceF } } object TypeCoercion extends TypeCoercion with Logging { def apply[A: RefInfo, B: RefInfo] = coercion[A, B] }
eHarmony/aloha
aloha-core/src/main/scala/com/eharmony/aloha/models/TypeCoercion.scala
Scala
mit
11,456
/* * The MIT License (MIT) * <p> * Copyright (c) 2017-2019 * <p> * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * <p> * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * <p> * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package io.techcode.streamy.util.json import scala.collection.mutable.ArrayBuffer /** * Json operations implementation compliant with RFC-6902. * https://tools.ietf.org/html/rfc6902 */ sealed trait JsonOperation { /** * Apply operation on json value. * * @param json json value to operate. * @return json value operate or none. */ def apply(json: Json): MaybeJson } /** * Abstract operation implementation. * * @param path json path. */ private[json] abstract class AbstractOperation(path: JsonPointer) extends JsonOperation { // Shortcut to underlying data structure val underlying: ArrayBuffer[JsModifier] = path.underlying /** * Apply operation recursively. * * @param path json path to use. * @param idx current position in json path. * @param current current value explored. * @return json value modified or [[JsUndefined]]. */ private[json] def apply( path: JsonPointer, idx: Int, current: MaybeJson ): MaybeJson = { // We fail to evaluate path if not mapped current.flatMap[Json] { ref => val modifier = underlying(idx) // We are in final state if (idx == underlying.length - 1) { operate(modifier, ref.copy()) } else { // Recursive call until final state apply(path, idx + 1, modifier.get(ref)).flatMap[Json] { result => modifier.set(ref.copy(), result) } } } } /** * Apply operation on current json value. * * @param accessor json value accessor. * @param current current json value. * @return json value modified or [[JsUndefined]]. */ def operate(accessor: JsModifier, current: Json): MaybeJson } /** * Set a json value at pointed location based on an arbitrary function. * * @param path json path. * @param f arbitrary function. */ private[json] case class SetFunc[T](path: JsonPointer, f: T => Json)(implicit c: JsTyped[T]) extends AbstractOperation(path) { override def apply(json: Json): MaybeJson = if (path.isEmpty) { f(json.get[T]) } else { apply(path, 0, json) } def operate(modifier: JsModifier, current: Json): MaybeJson = modifier.get(current).flatMap[T](v => modifier.set(current, f(v))) } /** * Add a json value at pointed location. * * @param path json path. * @param value json value to add with. */ case class Add(path: JsonPointer, value: Json) extends AbstractOperation(path) { override def apply(json: Json): MaybeJson = if (path.isEmpty) { value } else { apply(path, 0, json) } def operate(modifier: JsModifier, current: Json): MaybeJson = modifier.add(current, value) } /** * Replace a json value at pointed location. * * @param path json path. * @param value json value to replace with. */ case class Replace(path: JsonPointer, value: Json) extends AbstractOperation(path) { override def apply(json: Json): MaybeJson = if (path.isEmpty) { value } else { apply(path, 0, json) } def operate(modifier: JsModifier, current: Json): MaybeJson = modifier.replace(current, value) } /** * Remove a json value at pointed location. * * @param path json path. * @param mustExist whether the json value must exist to successed. */ case class Remove(path: JsonPointer, mustExist: Boolean = true) extends AbstractOperation(path) { override def apply(json: Json): MaybeJson = if (path.isEmpty) { json } else { val result = apply(path, 0, json) if (mustExist) { result } else { result.orElse(json) } } def operate(modifier: JsModifier, current: Json): MaybeJson = modifier.remove(current, mustExist) } /** * Move a json value from pointed location to another pointed location. * * @param from from json path location. * @param to to json path location. */ case class Move(from: JsonPointer, to: JsonPointer) extends JsonOperation { def apply(json: Json): MaybeJson = json.evaluate(from) .flatMap[Json](r => Remove(from)(json).flatMap[Json](Add(to, r)(_))) } /** * Copy a json value from pointed location to another pointed location. * * @param from from json path location. * @param to to json path location. */ case class Copy(from: JsonPointer, to: JsonPointer) extends JsonOperation { def apply(json: Json): MaybeJson = json.evaluate(from).flatMap[Json](Add(to, _)(json)) } /** * Test if a json value at pointed location is equal to another one. * * @param path json path location. * @param value json value to compate with. */ case class Test(path: JsonPointer, value: Json) extends JsonOperation { def apply(json: Json): MaybeJson = json.evaluate(path).flatMap[Json] { x => if (x.equals(value)) { json } else { JsUndefined } } }
amannocci/streamy
core/src/main/scala/io/techcode/streamy/util/json/JsonOperation.scala
Scala
mit
6,053
/* * Created on 2011/11/10 * Copyright (c) 2010-2014, Wei-ju Wu. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of Wei-ju Wu nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package org.zmpp.base /** * A space-saving data structure that we can use to implement undo */ class CircularStack[T](capacity: Int) { private val buffer = Array.ofDim[AnyRef](capacity) private var top: Int = 0 private var size = 0 def empty = size == 0 def push(elem: T) { buffer(top) = elem.asInstanceOf[AnyRef] top = (top + 1) % capacity size += 1 if (size > capacity) size = capacity } def pop: T = { var pos = top - 1 if (pos < 0) pos = capacity + pos val result = buffer(pos) size -= 1 top = pos result.asInstanceOf[T] } }
weiju/zmpp2
zmpp-common/src/main/scala/org/zmpp/base/Util.scala
Scala
bsd-3-clause
2,157
/* * Copyright 2015 Dennis Vriend * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example import spark._ trait Spark extends SparkJava { def convertToRequest(path: String, f: (Request, Response) ⇒ AnyRef): Route = { new Route() { override def handle(request: Request, response: Response): AnyRef = { f(request, response) } } } def convertToFilter(path: String, f: (Request, Response) ⇒ Unit): Filter = { new Filter() { override def handle(request: Request, response: Response): Unit = { f(request, response) } } } def get(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { get(path, convertToRequest(path, f)) } def post(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { post(path, convertToRequest(path, f)) } def put(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { put(path, convertToRequest(path, f)) } def patch(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { patch(path, convertToRequest(path, f)) } def delete(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { delete(path, convertToRequest(path, f)) } def head(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { head(path, convertToRequest(path, f)) } def trace(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { trace(path, convertToRequest(path, f)) } def connect(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { connect(path, convertToRequest(path, f)) } def options(path: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { options(path, convertToRequest(path, f)) } def before(path: String)(implicit f: (Request, Response) ⇒ Unit): Unit = { before(path, convertToFilter(path, f)) } def after(path: String)(implicit f: (Request, Response) ⇒ Unit): Unit = { after(path, convertToFilter(path, f)) } def get(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { get(path, acceptType, convertToRequest(path, f)) } def post(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { post(path, acceptType, convertToRequest(path, f)) } def put(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { put(path, acceptType, convertToRequest(path, f)) } def patch(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { patch(path, acceptType, convertToRequest(path, f)) } def delete(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { delete(path, acceptType, convertToRequest(path, f)) } def head(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { head(path, acceptType, convertToRequest(path, f)) } def trace(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { trace(path, acceptType, convertToRequest(path, f)) } def connect(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { connect(path, acceptType, convertToRequest(path, f)) } def options(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ AnyRef): Unit = { options(path, acceptType, convertToRequest(path, f)) } def before(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ Unit): Unit = { before(path, acceptType, convertToFilter(path, f)) } def after(path: String, acceptType: String)(implicit f: (Request, Response) ⇒ Unit): Unit = { after(path, acceptType, convertToFilter(path, f)) } }
dnvriend/spark-scala
src/main/scala/com/example/Spark.scala
Scala
apache-2.0
4,271
package com.dt.scala.type_parameterization import scala.io.Source import scala.io.BufferedSource trait Reader{ type In <: java.io.Serializable type Contents def read(in: In): Contents } class FileReader extends Reader { type In = String type Contents = BufferedSource override def read(name: In) = Source.fromFile(name) } object Abstract_Types { def main(args: Array[String]) { val fileReader = new FileReader val content = fileReader.read("E:\\\\WangJialin.txt") for (line <- content.getLines){ println(line) } } }
slieer/scala-tutorials
src/main/scala/com/dt/scala/type_parameterization/Abstract_Types.scala
Scala
apache-2.0
577
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.nodes.physical.stream import org.apache.flink.table.planner.calcite.FlinkTypeFactory import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecSortLimit import org.apache.flink.table.planner.plan.nodes.exec.{InputProperty, ExecNode} import org.apache.flink.table.planner.plan.utils._ import org.apache.calcite.plan.{RelOptCluster, RelTraitSet} import org.apache.calcite.rel.core.Sort import org.apache.calcite.rel.metadata.RelMetadataQuery import org.apache.calcite.rel.{RelCollation, RelNode, RelWriter} import org.apache.calcite.rex.{RexLiteral, RexNode} import scala.collection.JavaConversions._ /** * Stream physical RelNode for [[Sort]]. * * This RelNode take the `limit` elements beginning with the first `offset` elements. **/ class StreamPhysicalSortLimit( cluster: RelOptCluster, traitSet: RelTraitSet, inputRel: RelNode, sortCollation: RelCollation, offset: RexNode, fetch: RexNode, rankStrategy: RankProcessStrategy) extends Sort(cluster, traitSet, inputRel, sortCollation, offset, fetch) with StreamPhysicalRel { private val limitStart: Long = SortUtil.getLimitStart(offset) private val limitEnd: Long = SortUtil.getLimitEnd(offset, fetch) override def requireWatermark: Boolean = false override def copy( traitSet: RelTraitSet, newInput: RelNode, newCollation: RelCollation, offset: RexNode, fetch: RexNode): Sort = { new StreamPhysicalSortLimit( cluster, traitSet, newInput, newCollation, offset, fetch, rankStrategy) } def copy(newStrategy: RankProcessStrategy): StreamPhysicalSortLimit = { new StreamPhysicalSortLimit(cluster, traitSet, input, sortCollation, offset, fetch, newStrategy) } override def explainTerms(pw: RelWriter): RelWriter = { pw.input("input", getInput) .item("orderBy", RelExplainUtil.collationToString(sortCollation, getRowType)) .item("offset", limitStart) .item("fetch", RelExplainUtil.fetchToString(fetch)) .item("strategy", rankStrategy) } override def estimateRowCount(mq: RelMetadataQuery): Double = { val inputRows = mq.getRowCount(this.getInput) if (inputRows == null) { inputRows } else { val rowCount = (inputRows - limitStart).max(1.0) if (fetch != null) { rowCount.min(RexLiteral.intValue(fetch)) } else { rowCount } } } override def translateToExecNode(): ExecNode[_] = { val generateUpdateBefore = ChangelogPlanUtils.generateUpdateBefore(this) new StreamExecSortLimit( SortUtil.getSortSpec(sortCollation.getFieldCollations), limitStart, limitEnd, rankStrategy, generateUpdateBefore, InputProperty.DEFAULT, FlinkTypeFactory.toLogicalRowType(getRowType), getRelDetailedDescription ) } }
apache/flink
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalSortLimit.scala
Scala
apache-2.0
3,672
package codecheck.github.models sealed abstract class SortDirection(val name: String) { override def toString = name } object SortDirection { case object asc extends SortDirection("asc") case object desc extends SortDirection("desc") val values = Array(asc, desc) def fromString(str: String) = values.filter(_.name == str).head }
code-check/github-api-scala
src/main/scala/codecheck/github/models/SortDirection.scala
Scala
mit
345
/* * Copyright (c) 2011-15 Miles Sabin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package shapeless package syntax import scala.language.dynamics import tag.@@ /** * Record operations on `HList`'s with field-like elements. * * @author Miles Sabin */ final class RecordOps[L <: HList](val l : L) extends AnyVal with Serializable { import shapeless.labelled._ import ops.record._ /** * Returns the value associated with the singleton typed key k. Only available if this record has a field with * with keyType equal to the singleton type k.T. */ def get(k: Witness)(implicit selector : Selector[L, k.T]): selector.Out = selector(l) /** * Returns the value associated with the singleton typed key k. Only available if this record has a field with * with keyType equal to the singleton type k.T. * * Note that this can creates a bogus ambiguity with `HListOps#apply` as described in * https://issues.scala-lang.org/browse/SI-5142. If this method is accessible the conflict can be worked around by * using HListOps#at instead of `HListOps#apply`. */ def apply(k: Witness)(implicit selector : Selector[L, k.T]): selector.Out = selector(l) /** * Returns the value associated with the singleton typed key k. Only available if this record has a field with * with keyType equal to the singleton type k.T. */ def fieldAt(k: Witness)(implicit selector : Selector[L, k.T]): FieldType[k.T, selector.Out] = field[k.T](selector(l)) /** * Updates or adds to this record a field with key type F and value type F#valueType. */ def updated[V](k: Witness, v: V)(implicit updater: Updater[L, FieldType[k.T, V]]) : updater.Out = updater(l, field[k.T](v)) /** * Updates a field having a value with type A by given function. */ def updateWith[W](k: WitnessWith[FSL])(f: k.instance.Out => W) (implicit modifier: Modifier[L, k.T, k.instance.Out, W]): modifier.Out = modifier(l, f) type FSL[K] = Selector[L, K] /** * Remove the field associated with the singleton typed key k, returning both the corresponding value and the updated * record. Only available if this record has a field with keyType equal to the singleton type k.T. */ def remove(k : Witness)(implicit remover: Remover[L, k.T]): remover.Out = remover(l) /** * Updates or adds to this record a field of type F. */ def +[F](f: F)(implicit updater : Updater[L, F]): updater.Out = updater(l, f) /** * Remove the field associated with the singleton typed key k, returning the updated record. Only available if this * record has a field with keyType equal to the singleton type k.T. */ def -[V, Out <: HList](k: Witness)(implicit remover : Remover.Aux[L, k.T, (V, Out)]): Out = remover(l)._2 /** * Returns the union of this record and another record. */ def merge[M <: HList](m: M)(implicit merger: Merger[L, M]): merger.Out = merger(l, m) /** * Rename the field associated with the singleton typed key oldKey. Only available if this * record has a field with keyType equal to the singleton type oldKey.T. */ def renameField(oldKey: Witness, newKey: Witness)(implicit renamer: Renamer[L, oldKey.T, newKey.T]): renamer.Out = renamer(l) /** * Returns the keys of this record as an `HList` of singleton typed values. */ def keys(implicit keys: Keys[L]): keys.Out = keys() /** * Returns a `HList` of the values of this record. */ def values(implicit values: Values[L]): values.Out = values(l) /** * Returns a `HList` made of the key-value pairs of this record. */ def fields(implicit fields: Fields[L]): fields.Out = fields(l) /** * Returns a `Map` whose keys and values are typed as the Lub of the keys * and values of this record. */ def toMap[K, V](implicit toMap: ToMap.Aux[L, K, V]): Map[K, V] = toMap(l) /** * Maps a higher rank function across the values of this record. */ def mapValues(f: Poly)(implicit mapValues: MapValues[f.type, L]): mapValues.Out = mapValues(l) /** * Returns a wrapped version of this record that provides `selectDynamic` access to fields. */ def record: DynamicRecordOps[L] = DynamicRecordOps(l) } /** * Record wrapper providing `selectDynamic` access to fields. * * @author Cody Allen */ final case class DynamicRecordOps[L <: HList](l : L) extends Dynamic { import ops.record.Selector /** * Allows dynamic-style access to fields of the record whose keys are Symbols. */ def selectDynamic(key: String)(implicit selector: Selector[L, Symbol @@ key.type]): selector.Out = selector(l) }
liff/shapeless
core/src/main/scala/shapeless/syntax/records.scala
Scala
apache-2.0
5,118
package scala.collection.immutable import org.scalacheck._ import org.scalacheck.Prop._ import Gen._ object BitSetProperties extends Properties("immutable.BitSet") { // the top of the range shouldn't be too high, else we may not get enough overlap implicit val arbitraryBitSet: Arbitrary[BitSet] = Arbitrary( oneOf( const(BitSet()), oneOf(0 to 100).map(i => BitSet(i)), listOfN(200, oneOf(0 to 10000)).map(_.to(BitSet)) ) ) property("min") = forAll { (bs: BitSet) => bs.nonEmpty ==> (bs.min ?= bs.toList.min) } property("min reverse") = forAll { (bs: BitSet) => bs.nonEmpty ==> (bs.min(Ordering.Int.reverse) ?= bs.toList.min(Ordering.Int.reverse)) } property("max") = forAll { (bs: BitSet) => bs.nonEmpty ==> (bs.max ?= bs.toList.max) } property("max reverse") = forAll { (bs: BitSet) => bs.nonEmpty ==> (bs.max(Ordering.Int.reverse) ?= bs.toList.max(Ordering.Int.reverse)) } property("diff bitSet") = forAll { (left: BitSet, right: BitSet) => (left.diff(right): Set[Int]) ?= left.to(HashSet).diff(right.to(HashSet)) } property("diff hashSet") = forAll { (left: BitSet, right: BitSet) => (left.diff(right.to(HashSet)) : Set[Int]) ?= left.to(HashSet).diff(right.to(HashSet)) } property("filter") = forAll { (bs: BitSet) => bs.filter(_ % 2 == 0) ?= bs.toList.filter(_ % 2 == 0).to(BitSet) } property("filterNot") = forAll { (bs: BitSet) => bs.filterNot(_ % 2 == 0) ?= bs.toList.filterNot(_ % 2 == 0).to(BitSet) } property("partition") = forAll { (bs: BitSet) => val p = (i: Int) => i % 2 == 0 val (left, right) = bs.partition(p) (left ?= bs.filter(p)) && (right ?= bs.filterNot(p)) } property("iteratorFrom") = forAll( listOfN(200, oneOf(0 to 10000)).map(_.to(Set)), Gen.chooseNum[Int](0, 10000)) { (xs: Set[Int], start: Int) => val bs = xs.to(BitSet) bs.iteratorFrom(start).to(Set) ?= xs.filter(_ >= start) } }
scala/scala
test/scalacheck/scala/collection/immutable/BitSetProperties.scala
Scala
apache-2.0
1,952
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler import org.apache.spark.util.Utils /** * Information about a running task attempt inside a TaskSet. */ private[spark] class TaskInfo( val taskId: Long, val index: Int, val launchTime: Long, val executorId: String, val host: String, val taskLocality: TaskLocality.TaskLocality) { /** * The time when the task started remotely getting the result. Will not be set if the * task result was sent immediately when the task finished (as opposed to sending an * IndirectTaskResult and later fetching the result from the block manager). */ var gettingResultTime: Long = 0 /** * The time when the task has completed successfully (including the time to remotely fetch * results, if necessary). */ var finishTime: Long = 0 var failed = false def markGettingResult(time: Long = System.currentTimeMillis) { gettingResultTime = time } def markSuccessful(time: Long = System.currentTimeMillis) { finishTime = time } def markFailed(time: Long = System.currentTimeMillis) { finishTime = time failed = true } def gettingResult: Boolean = gettingResultTime != 0 def finished: Boolean = finishTime != 0 def successful: Boolean = finished && !failed def running: Boolean = !finished def status: String = { if (running) "RUNNING" else if (gettingResult) "GET RESULT" else if (failed) "FAILED" else if (successful) "SUCCESS" else "UNKNOWN" } def duration: Long = { if (!finished) { throw new UnsupportedOperationException("duration() called on unfinished tasks") } else { finishTime - launchTime } } def timeRunning(currentTime: Long): Long = currentTime - launchTime }
windeye/spark
core/src/main/scala/org/apache/spark/scheduler/TaskInfo.scala
Scala
apache-2.0
2,575
/*********************************************************************** * Copyright (c) 2013-2022 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.kafka.index import java.util.Date import java.util.concurrent.{ScheduledExecutorService, TimeUnit} import org.geotools.filter.text.ecql.ECQL import org.junit.runner.RunWith import org.locationtech.geomesa.features.ScalaSimpleFeature import org.locationtech.geomesa.kafka.ExpirationMocking.{MockTicker, ScheduledExpiry, WrappedRunnable} import org.locationtech.geomesa.kafka.data.KafkaDataStore._ import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes import org.locationtech.geomesa.utils.io.WithClose import org.mockito.ArgumentMatchers import org.opengis.feature.simple.SimpleFeature import org.specs2.mock.Mockito import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner import scala.concurrent.duration.Duration @RunWith(classOf[JUnitRunner]) class EventTimeFeatureCacheTest extends Specification with Mockito { sequential // sequential helps expiration timing to be more consistent val sft = SimpleFeatureTypes.createType("track", "trackId:String,dtg:Date:default=true,*geom:Point:srid=4326") val res = IndexResolution(180, 90) "EventTimeFeatureCache" should { "order by event time" in { val ex = EventTimeConfig(Duration.Inf, "dtg", ordered = true) val config = IndexConfig(ex, res, Seq.empty, Seq.empty, lazyDeserialization = true, None) WithClose(KafkaFeatureCache(sft, config)) { cache => val sf1 = ScalaSimpleFeature.create(sft, "1", "first", "2018-01-01T12:00:00.000Z", "POINT (-78.0 35.0)") cache.put(sf1) val sf2 = ScalaSimpleFeature.create(sft, "1", "second", "2018-01-01T11:59:55.000Z", "POINT (-78.0 35.0)") cache.put(sf2) cache.query("1") must beSome(sf1.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf1) val sf3 = ScalaSimpleFeature.create(sft, "1", "third", "2018-01-01T12:00:05.000Z", "POINT (-78.0 35.0)") cache.put(sf3) cache.query("1") must beSome(sf3.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf3) } } "order by event time expression" in { val ex = EventTimeConfig(Duration.Inf, "dateToLong(dtg)", ordered = true) val config = IndexConfig(ex, res, Seq.empty, Seq.empty, lazyDeserialization = true, None) WithClose(KafkaFeatureCache(sft, config)) { cache => val sf1 = ScalaSimpleFeature.create(sft, "1", "first", "2018-01-01T12:00:00.000Z", "POINT (-78.0 35.0)") cache.put(sf1) val sf2 = ScalaSimpleFeature.create(sft, "1", "second", "2018-01-01T11:59:55.000Z", "POINT (-78.0 35.0)") cache.put(sf2) cache.query("1") must beSome(sf1.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf1) val sf3 = ScalaSimpleFeature.create(sft, "1", "third", "2018-01-01T12:00:05.000Z", "POINT (-78.0 35.0)") cache.put(sf3) cache.query("1") must beSome(sf3.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf3) } } "order by message time" in { val config = IndexConfig(NeverExpireConfig, res, Seq.empty, Seq.empty, lazyDeserialization = true, None) WithClose(KafkaFeatureCache(sft, config)) { cache => val sf1 = ScalaSimpleFeature.create(sft, "1", "first", "2018-01-01T12:00:00.000Z", "POINT (-78.0 35.0)") cache.put(sf1) val sf2 = ScalaSimpleFeature.create(sft, "1", "second", "2018-01-01T11:59:55.000Z", "POINT (-78.0 35.0)") cache.put(sf2) cache.query("1") must beSome(sf2.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf2) val sf3 = ScalaSimpleFeature.create(sft, "1", "third", "2018-01-01T12:00:05.000Z", "POINT (-78.0 35.0)") cache.put(sf3) cache.query("1") must beSome(sf3.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf3) } } "expire by event time with ordering" in { val ex = mock[ScheduledExecutorService] val ticker = new MockTicker() val ev = EventTimeConfig(Duration("100ms"), "dtg", ordered = true) val config = IndexConfig(ev, res, Seq.empty, Seq.empty, lazyDeserialization = true, Some((ex, ticker))) WithClose(KafkaFeatureCache(sft, config)) { cache => val sf1 = ScalaSimpleFeature.create(sft, "1", "first", new Date(ticker.millis), "POINT (-78.0 35.0)") val expire1 = new WrappedRunnable(100L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire1.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire1) } cache.put(sf1) expire1.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire1.runnable), ArgumentMatchers.eq(100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) // move time forward ticker.millis += 50L val sf2 = ScalaSimpleFeature.create(sft, "1", "second", new Date(ticker.millis - 1000), "POINT (-78.0 35.0)") cache.put(sf2) expire1.cancelled must beFalse cache.query("1") must beSome(sf1.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf1) // move time forward and run the expiration ticker.millis += 100L expire1.runnable.run() cache.query("1") must beNone cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")) must beEmpty // move time forward ticker.millis += 100L val sf3 = ScalaSimpleFeature.create(sft, "1", "third", new Date(ticker.millis - 10), "POINT (-78.0 35.0)") // expiration should be 90 millis based on the event time date val expire3 = new WrappedRunnable(90L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(90L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire3.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire3) } cache.put(sf3) expire3.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire3.runnable), ArgumentMatchers.eq(90L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) cache.query("1") must beSome(sf3.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf3) // move time forward and run the expiration ticker.millis += 100L expire3.runnable.run() cache.query("1") must beNone cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")) must beEmpty // verify that the second feature didn't trigger an expiration, as it was ignored due to event time there were two(ex).schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.anyLong(), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) } } "expire by event time with ordering (no mocking)" in { val ev = EventTimeConfig(Duration("100ms"), "dtg", ordered = true) val config = IndexConfig(ev, res, Seq.empty, Seq.empty, lazyDeserialization = true, None) WithClose(KafkaFeatureCache(sft, config)) { cache => val sf1 = ScalaSimpleFeature.create(sft, "1", "first", new Date(), "POINT (-78.0 35.0)") cache.put(sf1) cache.query("1") must beSome(sf1.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf1) eventually(cache.query("1") must beNone) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq must beEmpty } } "expire by event time without ordering" in { val ex = mock[ScheduledExecutorService] val ticker = new MockTicker() val ev = EventTimeConfig(Duration("100ms"), "dtg", ordered = false) val config = IndexConfig(ev, res, Seq.empty, Seq.empty, lazyDeserialization = true, Some((ex, ticker))) WithClose(KafkaFeatureCache(sft, config)) { cache => val sf1 = ScalaSimpleFeature.create(sft, "1", "first", new Date(ticker.millis), "POINT (-78.0 35.0)") val expire1 = new WrappedRunnable(100L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire1.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire1) } cache.put(sf1) expire1.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire1.runnable), ArgumentMatchers.eq(100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) ticker.millis += 10L val sf2 = ScalaSimpleFeature.create(sft, "1", "second", new Date(ticker.millis - 50), "POINT (-78.0 35.0)") val expire2 = new WrappedRunnable(50L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(50L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire2.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire2) } cache.put(sf2) expire2.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire2.runnable), ArgumentMatchers.eq(50L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) expire1.cancelled must beTrue cache.query("1") must beSome(sf2.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf2) // move time forward and run the expiration ticker.millis += 100L expire2.runnable.run() cache.query("1") must beNone cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")) must beEmpty val sf3 = ScalaSimpleFeature.create(sft, "1", "third", new Date(ticker.millis + 1000), "POINT (-78.0 35.0)") // expiration should be 1100 millis based on the event time date val expire3 = new WrappedRunnable(1100L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(1100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire3.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire3) } cache.put(sf3) expire3.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire3.runnable), ArgumentMatchers.eq(1100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) cache.query("1") must beSome(sf3.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf3) // move time forward and run the expiration ticker.millis += 1100L expire3.runnable.run() cache.query("1") must beNone cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")) must beEmpty } } "expire by advanced event time without ordering" in { val ex = mock[ScheduledExecutorService] val ticker = new MockTicker() val ev = FilteredExpiryConfig(Seq( "trackId = 'first'" -> EventTimeConfig(Duration("100ms"), "dtg", ordered = false), "trackId = 'second'" -> EventTimeConfig(Duration("150ms"), "dtg", ordered = false), "INCLUDE" -> EventTimeConfig(Duration("200ms"), "dtg", ordered = false) )) val config = IndexConfig(ev, res, Seq.empty, Seq.empty, lazyDeserialization = true, Some((ex, ticker))) WithClose(KafkaFeatureCache(sft, config)) { cache => val sf1 = ScalaSimpleFeature.create(sft, "1", "first", new Date(ticker.millis), "POINT (-78.0 35.0)") val expire1 = new WrappedRunnable(100L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire1.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire1) } cache.put(sf1) expire1.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire1.runnable), ArgumentMatchers.eq(100L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) ticker.millis += 10L val sf2 = ScalaSimpleFeature.create(sft, "1", "second", new Date(ticker.millis - 30), "POINT (-78.0 35.0)") val expire2 = new WrappedRunnable(120L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(120L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire2.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire2) } cache.put(sf2) expire2.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire2.runnable), ArgumentMatchers.eq(120L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) expire1.cancelled must beTrue cache.query("1") must beSome(sf2.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf2) // move time forward and run the expiration ticker.millis += 200L expire2.runnable.run() cache.query("1") must beNone cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")) must beEmpty val sf3 = ScalaSimpleFeature.create(sft, "1", "third", new Date(ticker.millis + 1000), "POINT (-78.0 35.0)") // expiration should be 1200 millis based on the event time date val expire3 = new WrappedRunnable(1200L) ex.schedule(ArgumentMatchers.any[Runnable](), ArgumentMatchers.eq(1200L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) responds { args => expire3.runnable = args.asInstanceOf[Array[AnyRef]](0).asInstanceOf[Runnable] new ScheduledExpiry(expire3) } cache.put(sf3) expire3.runnable must not(beNull) there was one(ex).schedule(ArgumentMatchers.eq(expire3.runnable), ArgumentMatchers.eq(1200L), ArgumentMatchers.eq(TimeUnit.MILLISECONDS)) cache.query("1") must beSome(sf3.asInstanceOf[SimpleFeature]) cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")).toSeq mustEqual Seq(sf3) // move time forward and run the expiration ticker.millis += 1200L expire3.runnable.run() cache.query("1") must beNone cache.query(ECQL.toFilter("bbox(geom,-79.0,34.0,-77.0,36.0)")) must beEmpty } } } }
locationtech/geomesa
geomesa-kafka/geomesa-kafka-datastore/src/test/scala/org/locationtech/geomesa/kafka/index/EventTimeFeatureCacheTest.scala
Scala
apache-2.0
15,349
package com.gravity.hadoop import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{PathFilter, Path, FileSystem} import org.apache.hadoop.io.{SequenceFile, Writable} import java.io._ import scala.collection.mutable.Buffer /** * Convenience methods for reading and writing files to and from hdfs. */ package object hdfs { implicit def asRichFileSystem(fs: FileSystem) = new RichFileSystem(fs) /** * Gives you a file writer into the local cluster hdfs instance * @param relpath The relative path * @param recreateIfPresent If true, will delete the file if it already exists * @param work A function that works with the output. The output will be closed when this function goes out of scope. * @return */ def withHdfsWriter(fs: FileSystem, relpath: String, recreateIfPresent: Boolean = true)(work: (BufferedWriter) => Unit) { val path = new Path(relpath) val fileSystem = fs if (recreateIfPresent) { if (fileSystem.exists(path)) { fileSystem.delete(path) } } val output = new BufferedWriter(new OutputStreamWriter(fileSystem.create(path))) try { work(output) } finally { output.close() } } def perPartSequenceFileKV[K <: Writable, V <: Writable](fs: FileSystem, relpath: String, conf: Configuration,fileBeginsWith:String="part-")(key: K, value: V)(line: (K, V) => Unit) { val glob = new Path(relpath) val files = fs.listStatus(glob, new PathFilter { override def accept(path: Path) = path.getName.startsWith(fileBeginsWith) }) for (file <- files) { perSequenceFileKV(fs, file.getPath.toString, conf)(key, value)(line) } } def perSequenceFileKV[K <: Writable, V <: Writable](fs: FileSystem, relpath: String, conf: Configuration)(key: K, value: V)(line: (K, V) => Unit) { val reader = new SequenceFile.Reader(fs, new Path(relpath), conf) try { while (reader.next(key, value)) { line(key, value) } } finally { reader.close() } } /** * Allows you to work with a reader opened into an hdfs file on the test cluster. * @param relpath The path to the file * @param work The work you will do * @tparam A If you want to return a value after the work, here it is. * @return */ def withHdfsReader[A](fs: FileSystem, relpath: String)(work: (BufferedReader) => A): A = { val path = new Path(relpath) val input = new BufferedReader(new InputStreamReader(fs.open(path))) try { work(input) } finally { input.close() } } def withHdfsDirectoryReader[A](fs: FileSystem, relpath: String)(work: (BufferedReader) => A): A = { val path = new Path(relpath) val input = new BufferedReader(new InputStreamReader(new RichFileSystem(fs).openParts(path))) try { work(input) } finally { input.close() } } /** * Reads a file into a buffer, allowing you to decide what's in the buffer depending on the output of the linereader function * @param relpath Path to local hdfs buffer * @param linereader Function to return an element in the buffer, given the line fo the file * @tparam A * @return */ def perHdfsLineToSeq[A](fs: FileSystem, relpath: String)(linereader: (String) => A): Seq[A] = { val result = Buffer[A]() withHdfsReader(fs, relpath) { input => var done = false while (!done) { val line = input.readLine() if (line == null) { done = true } else { result += linereader(line) } } } result.toSeq } /** * Reads a file line by line. If you want to have the results in a buffer, use perHdfsLineToSeq * @param relpath * @param linereader * @tparam A * @return */ def perHdfsLine[A](fs: FileSystem, relpath: String)(linereader: (String) => Unit) { withHdfsReader(fs, relpath) { input => var done = false while (!done) { val line = input.readLine() if (line == null) { done = true } else { linereader(line) } } } } /** * For each line in a directory of files * @param relpath Path to files (or glob path) * @param linereader Will be invoked once per line with a string representation * @return Bupkiss */ def perHdfsDirectoryLine(fs: FileSystem, relpath: String)(linereader: (String) => Unit) { withHdfsDirectoryReader(fs, relpath) { input => var done = false while (!done) { val line = input.readLine() if (line == null) { done = true } else { linereader(line) } } } } }
bikash/HPaste
src/main/scala/com/gravity/hadoop/hdfs.scala
Scala
apache-2.0
4,741
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.producer import kafka.utils._ import org.apache.kafka.common.utils.Utils class DefaultPartitioner(props: VerifiableProperties = null) extends Partitioner { private val random = new java.util.Random def partition(key: Any, numPartitions: Int): Int = { Utils.abs(key.hashCode) % numPartitions } }
eljefe6a/kafka
core/src/main/scala/kafka/producer/DefaultPartitioner.scala
Scala
apache-2.0
1,127
/* * Copyright 2016 Otto (GmbH & Co KG) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.schedoscope.lineage import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFactory} import org.apache.calcite.schema.Schema.TableType import org.apache.calcite.schema.impl.AbstractTable import org.apache.calcite.sql.`type`.SqlTypeName import org.schedoscope.dsl.{Structure, View} import scala.collection.JavaConverters._ /** * Table based on reflection over [[org.schedoscope.dsl.View]]s. * <p> * Uses the following field type conversions: * <table> * <thead> * <tr> * <th>ScalaType</th> * <th>→</th> * <th>RelDataType</th> * </tr> * </thead> * <tbody> * <tr> * <td>Byte</td> * <td>→</td> * <td>SMALLINT</td> * </tr> * <tr> * <td>Int</td> * <td>→</td> * <td>INTEGER</td> * </tr> * <tr> * <td>Long</td> * <td>→</td> * <td>BIGINT</td> * </tr> * <tr> * <td>Boolean</td> * <td>→</td> * <td>BOOLEAN</td> * </tr> * <tr> * <td>Double</td> * <td>→</td> * <td>DOUBLE</td> * </tr> * <tr> * <td>Float</td> * <td>→</td> * <td>FLOAT</td> * </tr> * <tr> * <td>String</td> * <td>→</td> * <td>VARCHAR</td> * </tr> * <tr> * <td>List[A]</td> * <td>→</td> * <td>ArraySqlType[A]</td> * </tr> * <tr> * <td>Map[K,V]</td> * <td>→</td> * <td>MapSqlType[K,V]</td> * </tr> * <tr> * <td>Structure</td> * <td>→</td> * <td>RelRecordType</td> * </tr> * <tr> * <td>_</td> * <td>→</td> * <td>ANY</td> * </tr> * </tbody> * </table> * * @author Jan Hicken (jhicken) */ case class SchedoscopeTable(view: View) extends AbstractTable { override val getJdbcTableType: TableType = TableType.VIEW override def getRowType(typeFactory: RelDataTypeFactory): RelDataType = { typeFactory.createStructType( view.fieldsAndParameters.map(f => relDataTypeOf(f.t, typeFactory)).asJava, view.fieldsAndParameters.map(_.n).asJava ) } private def relDataTypeOf(scalaType: Manifest[_], typeFactory: RelDataTypeFactory): RelDataType = { if (scalaType.runtimeClass == classOf[List[_]]) typeFactory.createArrayType( relDataTypeOf(scalaType.typeArguments.head, typeFactory), -1 ) else if (scalaType.runtimeClass == classOf[Map[_, _]]) typeFactory.createMapType( relDataTypeOf(scalaType.typeArguments.head, typeFactory), relDataTypeOf(scalaType.typeArguments(1), typeFactory) ) else if (classOf[Structure].isAssignableFrom(scalaType.runtimeClass)) { val struct = scalaType.runtimeClass.newInstance().asInstanceOf[Structure] typeFactory.createStructType( struct.fields.map(f => relDataTypeOf(f.t, typeFactory)).asJava, struct.fields.map(_.n).asJava ) } else if (scalaType == manifest[Byte]) typeFactory.createSqlType(SqlTypeName.SMALLINT) else if (scalaType == manifest[Int]) typeFactory.createSqlType(SqlTypeName.INTEGER) else if (scalaType == manifest[Long]) typeFactory.createSqlType(SqlTypeName.BIGINT) else if (scalaType == manifest[Boolean]) typeFactory.createSqlType(SqlTypeName.BOOLEAN) else if (scalaType == manifest[Double]) typeFactory.createSqlType(SqlTypeName.DOUBLE) else if (scalaType == manifest[Float]) typeFactory.createSqlType(SqlTypeName.FLOAT) else if (scalaType == manifest[String]) typeFactory.createSqlType(SqlTypeName.VARCHAR) else typeFactory.createSqlType(SqlTypeName.ANY) } }
utzwestermann/schedoscope
schedoscope-core/src/main/scala/org/schedoscope/lineage/SchedoscopeTable.scala
Scala
apache-2.0
4,003
package finloader import com.typesafe.config.ConfigFactory import java.io.File import scala.slick.lifted.TableQuery import scala.slick.jdbc.JdbcBackend.Database import finloader.entities._ import scala.slick.driver.JdbcDriver.simple._ /** * @author Paul Lysak * Date: 04.07.13 * Time: 23:20 */ object ITUtils { lazy val config = ConfigFactory.parseFile(new File("it.conf")) lazy val db = { val d = Database.forURL(config.getString("database.testUrl"), driver = config.getString("database.driver")) createSchema(d) d } private def createSchema(d: Database) { println("creating DB schema...") d withSession { implicit session => TableQuery[Expenses].ddl.create TableQuery[Balances].ddl.create TableQuery[Incomes].ddl.create TableQuery[ExchangeRates].ddl.create // println("ddl="+TableQuery[ExpenseTags].ddl.createStatements.mkString("\\n")) TableQuery[ExpenseTags].ddl.create TableQuery[FileInfos].ddl.create } println("DB schema created") } }
paul-lysak/finloader
src/it/scala/finloader/ITUtils.scala
Scala
apache-2.0
1,050
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.execution import java.io.IOException import java.net.URI import java.text.SimpleDateFormat import java.util.{Date, Locale, Random} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.hive.common.FileUtils import org.apache.hadoop.hive.ql.exec.TaskRunner import org.apache.hadoop.hive.ql.ErrorMsg import org.apache.hadoop.mapred.{FileOutputFormat, JobConf} import org.apache.spark.rdd.RDD import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.physical.Partitioning import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode} import org.apache.spark.sql.hive._ import org.apache.spark.sql.hive.HiveShim.{ShimFileSinkDesc => FileSinkDesc} import org.apache.spark.SparkException import org.apache.spark.util.SerializableJobConf /** * Command for writing data out to a Hive table. * * This class is mostly a mess, for legacy reasons (since it evolved in organic ways and had to * follow Hive's internal implementations closely, which itself was a mess too). Please don't * blame Reynold for this! He was just moving code around! * * In the future we should converge the write path for Hive with the normal data source write path, * as defined in [[org.apache.spark.sql.execution.datasources.FileFormatWriter]]. * * @param table the logical plan representing the table. In the future this should be a * [[org.apache.spark.sql.catalyst.catalog.CatalogTable]] once we converge Hive tables * and data source tables. * @param partition a map from the partition key to the partition value (optional). If the partition * value is optional, dynamic partition insert will be performed. * As an example, `INSERT INTO tbl PARTITION (a=1, b=2) AS ...` would have * * {{{ * Map('a' -> Some('1'), 'b' -> Some('2')) * }}} * * and `INSERT INTO tbl PARTITION (a=1, b) AS ...` * would have * * {{{ * Map('a' -> Some('1'), 'b' -> None) * }}}. * @param child the logical plan representing data to write to. * @param overwrite overwrite existing table or partitions. * @param ifNotExists If true, only write if the table or partition does not exist. */ case class InsertIntoHiveTable( table: MetastoreRelation, partition: Map[String, Option[String]], child: SparkPlan, overwrite: Boolean, ifNotExists: Boolean) extends UnaryExecNode { @transient private val sessionState = sqlContext.sessionState.asInstanceOf[HiveSessionState] @transient private val externalCatalog = sqlContext.sharedState.externalCatalog def output: Seq[Attribute] = Seq.empty val hadoopConf = sessionState.newHadoopConf() val stagingDir = hadoopConf.get("hive.exec.stagingdir", ".hive-staging") private def executionId: String = { val rand: Random = new Random val format = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss_SSS", Locale.US) "hive_" + format.format(new Date) + "_" + Math.abs(rand.nextLong) } private def getStagingDir(inputPath: Path, hadoopConf: Configuration): Path = { val inputPathUri: URI = inputPath.toUri val inputPathName: String = inputPathUri.getPath val fs: FileSystem = inputPath.getFileSystem(hadoopConf) val stagingPathName: String = if (inputPathName.indexOf(stagingDir) == -1) { new Path(inputPathName, stagingDir).toString } else { inputPathName.substring(0, inputPathName.indexOf(stagingDir) + stagingDir.length) } val dir: Path = fs.makeQualified( new Path(stagingPathName + "_" + executionId + "-" + TaskRunner.getTaskRunnerID)) logDebug("Created staging dir = " + dir + " for path = " + inputPath) try { if (!FileUtils.mkdir(fs, dir, true, hadoopConf)) { throw new IllegalStateException("Cannot create staging directory '" + dir.toString + "'") } fs.deleteOnExit(dir) } catch { case e: IOException => throw new RuntimeException( "Cannot create staging directory '" + dir.toString + "': " + e.getMessage, e) } return dir } private def getExternalScratchDir(extURI: URI, hadoopConf: Configuration): Path = { getStagingDir(new Path(extURI.getScheme, extURI.getAuthority, extURI.getPath), hadoopConf) } def getExternalTmpPath(path: Path, hadoopConf: Configuration): Path = { val extURI: URI = path.toUri if (extURI.getScheme == "viewfs") { getExtTmpPathRelTo(path.getParent, hadoopConf) } else { new Path(getExternalScratchDir(extURI, hadoopConf), "-ext-10000") } } def getExtTmpPathRelTo(path: Path, hadoopConf: Configuration): Path = { new Path(getStagingDir(path, hadoopConf), "-ext-10000") // Hive uses 10000 } private def saveAsHiveFile( rdd: RDD[InternalRow], valueClass: Class[_], fileSinkConf: FileSinkDesc, conf: SerializableJobConf, writerContainer: SparkHiveWriterContainer): Unit = { assert(valueClass != null, "Output value class not set") conf.value.setOutputValueClass(valueClass) val outputFileFormatClassName = fileSinkConf.getTableInfo.getOutputFileFormatClassName assert(outputFileFormatClassName != null, "Output format class not set") conf.value.set("mapred.output.format.class", outputFileFormatClassName) FileOutputFormat.setOutputPath( conf.value, SparkHiveWriterContainer.createPathFromString(fileSinkConf.getDirName(), conf.value)) log.debug("Saving as hadoop file of type " + valueClass.getSimpleName) writerContainer.driverSideSetup() sqlContext.sparkContext.runJob(rdd, writerContainer.writeToFile _) writerContainer.commitJob() } /** * Inserts all the rows in the table into Hive. Row objects are properly serialized with the * `org.apache.hadoop.hive.serde2.SerDe` and the * `org.apache.hadoop.mapred.OutputFormat` provided by the table definition. * * Note: this is run once and then kept to avoid double insertions. */ protected[sql] lazy val sideEffectResult: Seq[InternalRow] = { // Have to pass the TableDesc object to RDD.mapPartitions and then instantiate new serializer // instances within the closure, since Serializer is not serializable while TableDesc is. val tableDesc = table.tableDesc val tableLocation = table.hiveQlTable.getDataLocation val tmpLocation = getExternalTmpPath(tableLocation, hadoopConf) val fileSinkConf = new FileSinkDesc(tmpLocation.toString, tableDesc, false) val isCompressed = hadoopConf.get("hive.exec.compress.output", "false").toBoolean if (isCompressed) { // Please note that isCompressed, "mapred.output.compress", "mapred.output.compression.codec", // and "mapred.output.compression.type" have no impact on ORC because it uses table properties // to store compression information. hadoopConf.set("mapred.output.compress", "true") fileSinkConf.setCompressed(true) fileSinkConf.setCompressCodec(hadoopConf.get("mapred.output.compression.codec")) fileSinkConf.setCompressType(hadoopConf.get("mapred.output.compression.type")) } val numDynamicPartitions = partition.values.count(_.isEmpty) val numStaticPartitions = partition.values.count(_.nonEmpty) val partitionSpec = partition.map { case (key, Some(value)) => key -> value case (key, None) => key -> "" } // All partition column names in the format of "<column name 1>/<column name 2>/..." val partitionColumns = fileSinkConf.getTableInfo.getProperties.getProperty("partition_columns") val partitionColumnNames = Option(partitionColumns).map(_.split("/")).getOrElse(Array.empty) // By this time, the partition map must match the table's partition columns if (partitionColumnNames.toSet != partition.keySet) { throw new SparkException( s"""Requested partitioning does not match the ${table.tableName} table: |Requested partitions: ${partition.keys.mkString(",")} |Table partitions: ${table.partitionKeys.map(_.name).mkString(",")}""".stripMargin) } // Validate partition spec if there exist any dynamic partitions if (numDynamicPartitions > 0) { // Report error if dynamic partitioning is not enabled if (!hadoopConf.get("hive.exec.dynamic.partition", "true").toBoolean) { throw new SparkException(ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg) } // Report error if dynamic partition strict mode is on but no static partition is found if (numStaticPartitions == 0 && hadoopConf.get("hive.exec.dynamic.partition.mode", "strict").equalsIgnoreCase("strict")) { throw new SparkException(ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg) } // Report error if any static partition appears after a dynamic partition val isDynamic = partitionColumnNames.map(partitionSpec(_).isEmpty) if (isDynamic.init.zip(isDynamic.tail).contains((true, false))) { throw new AnalysisException(ErrorMsg.PARTITION_DYN_STA_ORDER.getMsg) } } val jobConf = new JobConf(hadoopConf) val jobConfSer = new SerializableJobConf(jobConf) // When speculation is on and output committer class name contains "Direct", we should warn // users that they may loss data if they are using a direct output committer. val speculationEnabled = sqlContext.sparkContext.conf.getBoolean("spark.speculation", false) val outputCommitterClass = jobConf.get("mapred.output.committer.class", "") if (speculationEnabled && outputCommitterClass.contains("Direct")) { val warningMessage = s"$outputCommitterClass may be an output committer that writes data directly to " + "the final location. Because speculation is enabled, this output committer may " + "cause data loss (see the case in SPARK-10063). If possible, please use an output " + "committer that does not have this behavior (e.g. FileOutputCommitter)." logWarning(warningMessage) } val writerContainer = if (numDynamicPartitions > 0) { val dynamicPartColNames = partitionColumnNames.takeRight(numDynamicPartitions) new SparkHiveDynamicPartitionWriterContainer( jobConf, fileSinkConf, dynamicPartColNames, child.output) } else { new SparkHiveWriterContainer( jobConf, fileSinkConf, child.output) } @transient val outputClass = writerContainer.newSerializer(table.tableDesc).getSerializedClass saveAsHiveFile(child.execute(), outputClass, fileSinkConf, jobConfSer, writerContainer) val outputPath = FileOutputFormat.getOutputPath(jobConf) // TODO: Correctly set holdDDLTime. // In most of the time, we should have holdDDLTime = false. // holdDDLTime will be true when TOK_HOLD_DDLTIME presents in the query as a hint. val holdDDLTime = false if (partition.nonEmpty) { if (numDynamicPartitions > 0) { externalCatalog.loadDynamicPartitions( db = table.catalogTable.database, table = table.catalogTable.identifier.table, outputPath.toString, partitionSpec, overwrite, numDynamicPartitions, holdDDLTime = holdDDLTime) } else { // scalastyle:off // ifNotExists is only valid with static partition, refer to // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML#LanguageManualDML-InsertingdataintoHiveTablesfromqueries // scalastyle:on val oldPart = externalCatalog.getPartitionOption( table.catalogTable.database, table.catalogTable.identifier.table, partitionSpec) var doHiveOverwrite = overwrite if (oldPart.isEmpty || !ifNotExists) { // SPARK-18107: Insert overwrite runs much slower than hive-client. // Newer Hive largely improves insert overwrite performance. As Spark uses older Hive // version and we may not want to catch up new Hive version every time. We delete the // Hive partition first and then load data file into the Hive partition. if (oldPart.nonEmpty && overwrite) { oldPart.get.storage.locationUri.foreach { uri => val partitionPath = new Path(uri) val fs = partitionPath.getFileSystem(hadoopConf) if (fs.exists(partitionPath)) { if (!fs.delete(partitionPath, true)) { throw new RuntimeException( "Cannot remove partition directory '" + partitionPath.toString) } // Don't let Hive do overwrite operation since it is slower. doHiveOverwrite = false } } } // inheritTableSpecs is set to true. It should be set to false for an IMPORT query // which is currently considered as a Hive native command. val inheritTableSpecs = true externalCatalog.loadPartition( table.catalogTable.database, table.catalogTable.identifier.table, outputPath.toString, partitionSpec, isOverwrite = doHiveOverwrite, holdDDLTime = holdDDLTime, inheritTableSpecs = inheritTableSpecs) } } } else { externalCatalog.loadTable( table.catalogTable.database, table.catalogTable.identifier.table, outputPath.toString, // TODO: URI overwrite, holdDDLTime) } // Invalidate the cache. sqlContext.sharedState.cacheManager.invalidateCache(table) sqlContext.sessionState.catalog.refreshTable(table.catalogTable.identifier) // It would be nice to just return the childRdd unchanged so insert operations could be chained, // however for now we return an empty list to simplify compatibility checks with hive, which // does not return anything for insert operations. // TODO: implement hive compatibility as rules. Seq.empty[InternalRow] } override def outputPartitioning: Partitioning = child.outputPartitioning override def executeCollect(): Array[InternalRow] = sideEffectResult.toArray protected override def doExecute(): RDD[InternalRow] = { sqlContext.sparkContext.parallelize(sideEffectResult.asInstanceOf[Seq[InternalRow]], 1) } }
kimoonkim/spark
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
Scala
apache-2.0
15,417
package gdg.blaze.ext.es import java.util.concurrent.TimeUnit import com.google.common.base.Stopwatch import gdg.blaze._ import org.apache.spark.streaming.dstream.DStream import org.elasticsearch.action.bulk.{BulkRequestBuilder, BulkResponse} import org.elasticsearch.client.transport.TransportClient import org.elasticsearch.common.settings.ImmutableSettings import org.elasticsearch.common.transport.InetSocketTransportAddress class ElasticSearchOutput(config: PluginConfig) extends Output { val action = config.getString("action").getOrElse("index") // val bind_host = config.getString("bind_host") // val bind_port = config.getString("bind_port") // val cluster = config.getString("cluster") val host = config.getString("host") val idle_flush_time = config.getInt("idle_flush_time").getOrElse(1) val manage_template = config.getBool("manage_template").getOrElse(true) val node_name = config.getString("node_name") val protocol = config.getString("protocol").getOrElse("transport") val index = config.getInterString("index") val index_type = config.getInterString("index_type") // val cluster = config.getString("cluster") def defaultPort: String => Int = { case "transport" => 9300 case "http" => 9200 } val port = config.getInt("port").getOrElse(defaultPort(protocol)) val timer = Stopwatch.createUnstarted() var tc: TransportClient = null class BulkSender { } var bulk: Option[BulkRequestBuilder] = None def sendBulkIfNecessary() = { if(bulk.isDefined && timer.elapsed(TimeUnit.SECONDS) > idle_flush_time) { timer.reset().start() val bulkItemResponses: BulkResponse = bulk.get.execute().actionGet(60, TimeUnit.SECONDS) if(bulkItemResponses.hasFailures) { throw new IllegalStateException("Bulk Failure : " + bulkItemResponses.buildFailureMessage()) } } } def process(dStream: DStream[Message]): Unit = { dStream.foreachRDD { rdd => sendBulkIfNecessary() } } private[blaze] def createClient(): TransportClient = { val settings = ImmutableSettings.builder .put("client.transport.sniff", "false") .put("client.transport.ignore_cluster_name", "true") .build val tc = new TransportClient(settings, false) host.get.split(",").foreach { h => return tc.addTransportAddress(new InetSocketTransportAddress(h.trim, port)) } tc } override def apply(v1: DStream[Message]): Unit = ??? } object ElasticSearchOutput extends PluginFactory[ElasticSearchOutput] { override def apply(config: PluginConfig, sc: BlazeContext): ElasticSearchOutput = ??? }
micahrupersburg/blaze-of-glory
src/main/scala/gdg/blaze/ext/es/ElasticSearchOutput.scala
Scala
apache-2.0
2,624
package com.twitter.finagle.memcached.replication import _root_.java.lang.{Boolean => JBoolean, Long => JLong} import scala.util.Random import com.twitter.conversions.time._ import com.twitter.finagle.builder.{Cluster, ClientBuilder, ClientConfig} import com.twitter.finagle.Group import com.twitter.finagle.memcached._ import com.twitter.finagle.memcached.protocol.Value import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver} import com.twitter.io.Buf import com.twitter.util._ sealed trait ReplicationStatus[T] /** * Indicating a consistent state across all replicas, which comes with the agreed consistent result; */ case class ConsistentReplication[T](result: T) extends ReplicationStatus[T] /** * Indicating an inconsistent state across all replicas, which comes with a sequence of result * from all replicas; each replica's result can be either Return[T] or Throw[T] */ case class InconsistentReplication[T](resultSeq: Seq[Try[T]]) extends ReplicationStatus[T] /** * indicating a failed state from all replicas, which comes with a sequence of failures from * all replicas; */ case class FailedReplication[T](failureSeq: Seq[Throw[T]]) extends ReplicationStatus[T] /** * Wrapping underlying replicas cas unique values for replication purpose. */ trait ReplicaCasUnique case class RCasUnique(uniques: Seq[Buf]) extends ReplicaCasUnique case class SCasUnique(casUnique: Buf) extends ReplicaCasUnique /** * Replication client helper */ object ReplicationClient { def newBaseReplicationClient( pools: Seq[Cluster[CacheNode]], clientBuilder: Option[ClientBuilder[_, _, _, _, ClientConfig.Yes]] = None, hashName: Option[String] = None, failureAccrualParams: (Int, () => Duration) = (5, () => 30.seconds) ) = { val underlyingClients = pools map { pool => Await.result(pool.ready) KetamaClientBuilder(Group.fromCluster(pool), hashName, clientBuilder, failureAccrualParams).build() } val repStatsReceiver = clientBuilder map { _.statsReceiver.scope("cache_replication") } getOrElse(NullStatsReceiver) new BaseReplicationClient(underlyingClients, repStatsReceiver) } def newSimpleReplicationClient( pools: Seq[Cluster[CacheNode]], clientBuilder: Option[ClientBuilder[_, _, _, _, ClientConfig.Yes]] = None, hashName: Option[String] = None, failureAccrualParams: (Int, () => Duration) = (5, () => 30.seconds) ) = { new SimpleReplicationClient(newBaseReplicationClient(pools, clientBuilder, hashName, failureAccrualParams)) } } /** * Base replication client. This client manages a list of base memcached clients representing * cache replicas. All replication API returns ReplicationStatus object indicating the underlying * replicas consistency state. * @param clients list of memcached clients with each one representing to a single cache pool * @param statsReceiver */ class BaseReplicationClient(clients: Seq[Client], statsReceiver: StatsReceiver = NullStatsReceiver) { private[this] val inconsistentContentCounter = statsReceiver.counter("inconsistent_content_count") private[this] val failedCounter = statsReceiver.counter("failed_replication_count") assert(!clients.isEmpty) /** * Return GetResult object that aggregates all hits, misses and failures. * This method will send the requested keys to each underlying replicas in a fixed order or * random order, and will stop passing along a key if a replica has returned 'hit'. * * TODO: introducing BackupRequestFilter to shorten the waiting period for secondary requests */ private[memcached] def getResult(keys: Iterable[String], useRandomOrder: Boolean): Future[GetResult] = { val clientsInOrder = if (useRandomOrder) Random.shuffle(clients) else clients def loopGet(clients: Seq[Client], currentRes: GetResult): Future[GetResult] = clients match { case _ if currentRes.misses.isEmpty && currentRes.failures.isEmpty => Future.value(currentRes) case Seq() => Future.value(currentRes) case Seq(c, tail@_*) => val missing = currentRes.misses ++ currentRes.failures.keySet c.getResult(missing) flatMap { case res => val newRes = GetResult.merged(Seq(GetResult(currentRes.hits), res)) loopGet(tail, newRes) } } loopGet(clientsInOrder, GetResult(Map.empty, keys.toSet)) } /** * Get one value for the input keys from the underlying replicas. * For each input key, this operation searches all replicas in an order until it finds the * first hit result, or return the last replica's result. */ def getOne(key: String, useRandomOrder: Boolean = false): Future[Option[Buf]] = getOne(Seq(key), useRandomOrder) map { _.values.headOption } def getOne(keys: Iterable[String], useRandomOrder: Boolean): Future[Map[String, Buf]] = getResult(keys, useRandomOrder) flatMap { result => if (result.failures.nonEmpty) Future.exception(result.failures.values.head) else Future.value(result.values) } /** * Get replication status for the input keys from the underlying replicas. * For each input key, this operation returns the aggregated replication status after requesting * all replicas. */ def getAll(key: String): Future[ReplicationStatus[Option[Buf]]] = getAll(Seq(key)) map { _.values.head } def getAll(keys: Iterable[String]): Future[Map[String, ReplicationStatus[Option[Buf]]]] = { val keySet = keys.toSet Future.collect(clients map { _.getResult(keySet) }) map { results: Seq[GetResult] => keySet.map { k => val replicasResult = results map { case r if (r.hits.contains(k)) => Return(Some(r.hits.get(k).get.value)) case r if (r.misses.contains(k)) => Return(None) case r if (r.failures.contains(k)) => Throw(r.failures.get(k).get) } k -> toReplicationStatus(replicasResult) }.toMap } } /** * Get replication status for the input keys and their checksum. The aggregated results returned * can be either of these three cases: * - ConsistentReplication, indicating consistent value across all replicas, which comes with * an aggregated cas unique to be used for CAS; * * - InconsistentReplication, indicating inconsistent values across all replicas, which comes * with each replica's own cas unique id; * * - FailedReplication, indicating failures from all replicas; */ def getsAll(key: String): Future[ReplicationStatus[Option[(Buf, ReplicaCasUnique)]]] = getsAll(Seq(key)) map { _.values.head } def getsAll(keys: Iterable[String]): Future[Map[String, ReplicationStatus[Option[(Buf, ReplicaCasUnique)]]]] = { val keySet = keys.toSet Future.collect(clients map { _.getsResult(keySet) }) map { results: Seq[GetsResult] => keySet.map { k => val replicasResult = results map { case r if (r.hits.contains(k)) => Return(Some(r.hits.get(k).get.value)) case r if (r.misses.contains(k)) => Return(None) case r if (r.failures.contains(k)) => Throw(r.failures.get(k).get) } k -> attachCas(toReplicationStatus(replicasResult), results, k) }.toMap } } // attach replication cas unique to the result for clients to do following CAS; // if all replicas are consistent, a RCasUnique is attached, // otherwise individual SCasUnique is attached private[this] def attachCas( valueStatus: ReplicationStatus[Option[Buf]], underlyingResults: Seq[GetsResult], key: String ): ReplicationStatus[Option[(Buf, ReplicaCasUnique)]] = valueStatus match { case ConsistentReplication(Some(v)) => val allReplicasCas = underlyingResults map {_.hits.get(key).get.casUnique.get} ConsistentReplication(Some((v, RCasUnique(allReplicasCas)))) case ConsistentReplication(None) => ConsistentReplication(None) case InconsistentReplication(rs) => val transformed = rs.zip(underlyingResults) map { case (Return(Some(v)), r: GetsResult) => val singleReplicaCas = r.hits.get(key).get.casUnique.get Return(Some((v, SCasUnique(singleReplicaCas)))) case (Return(None), _) => Return(None) case (Throw(e), _) => Throw(e) } InconsistentReplication(transformed) case FailedReplication(fs) => FailedReplication(fs map { t => Throw(t.e)}) } /** * Stores a key in all replicas and returns the aggregated replication status. */ def set(key: String, value: Buf): Future[ReplicationStatus[Unit]] = set(key, 0, Time.epoch, value) def set(key: String, flags: Int, expiry: Time, value: Buf): Future[ReplicationStatus[Unit]] = collectAndResolve[Unit](_.set(key, flags, expiry, value)) /** * Attempts to perform a CAS operation on all replicas, and returns the aggregated replication status. */ def checkAndSet(key: String, value: Buf, casUniques: Seq[Buf]): Future[ReplicationStatus[CasResult]] = checkAndSet(key, 0, Time.epoch, value, casUniques) def checkAndSet(key: String, flags: Int, expiry: Time, value: Buf, casUniques: Seq[Buf]): Future[ReplicationStatus[CasResult]] = { assert(clients.size == casUniques.size) // cannot use collectAndResolve helper here as this is the only case where there's no common op Future.collect((clients zip casUniques) map { case (c, u) => c.checkAndSet(key, flags, expiry, value, u).transform(Future.value) }) map { toReplicationStatus } } /** * Remove a key and returns the aggregated replication status. */ def delete(key: String): Future[ReplicationStatus[JBoolean]] = collectAndResolve[JBoolean](_.delete(key)) /** * Store a key in all replicas but only if it doesn't already exist on the server, and returns * the aggregated replication status. */ def add(key: String, value: Buf): Future[ReplicationStatus[JBoolean]] = add(key, 0, Time.epoch, value) def add(key: String, flags: Int, expiry: Time, value: Buf): Future[ReplicationStatus[JBoolean]] = collectAndResolve[JBoolean](_.add(key, flags, expiry, value)) /** * Replace existing key in all replicas, and returns the aggregated replication status. */ def replace(key: String, value: Buf): Future[ReplicationStatus[JBoolean]] = replace(key, 0, Time.epoch, value) def replace(key: String, flags: Int, expiry: Time, value: Buf): Future[ReplicationStatus[JBoolean]] = collectAndResolve[JBoolean](_.replace(key, flags, expiry, value)) /** * Increment a key and returns the aggregated replication status. */ def incr(key: String): Future[ReplicationStatus[Option[JLong]]] = incr(key, 1L) def incr(key: String, delta: Long): Future[ReplicationStatus[Option[JLong]]] = collectAndResolve[Option[JLong]](_.incr(key, delta)) /** * Decrement a key and returns the aggregated replication status. */ def decr(key: String): Future[ReplicationStatus[Option[JLong]]] = decr(key, 1L) def decr(key: String, delta: Long): Future[ReplicationStatus[Option[JLong]]] = collectAndResolve[Option[JLong]](_.decr(key, delta)) /** * Unsupported operation yet */ def append(key: String, value: Buf): Future[ReplicationStatus[JBoolean]] = append(key, 0, Time.epoch, value) def append(key: String, flags: Int, expiry: Time, value: Buf): Future[ReplicationStatus[JBoolean]] = throw new UnsupportedOperationException("append is not supported for cache replication client.") /** * Unsupported operation yet */ def prepend(key: String, value: Buf): Future[ReplicationStatus[JBoolean]] = prepend(key, 0, Time.epoch, value) def prepend(key: String, flags: Int, expiry: Time, value: Buf): Future[ReplicationStatus[JBoolean]] = throw new UnsupportedOperationException("prepend is not supported for cache replication client.") /** * Unsupported operation yet */ def stats(args: Option[String]): Future[Seq[String]] = throw new UnsupportedOperationException("stats is not supported for cache replication client.") def release() { clients foreach { _.release() } } /** * Translating the results sequence from all replicas into aggregated results, which can be * either of these three sub-types of ReplicationStatus: * * - ConsistentReplication, indicating a consistent state across all replicas, which comes with * the agreed consistent result; * * - InconsistentReplication, indicating an inconsistent state across all replicas, which comes * with a sequence of result from all replicas; * * - FailedReplication, indicating a failed state from all replicas, which comes with a sequence * of failures from all replicas; */ private[this] def toReplicationStatus[T](results: Seq[Try[T]]): ReplicationStatus[T] = { results match { case _ if (results.forall(_.isReturn)) && (results.distinct.size == 1) => ConsistentReplication(results.head.get()) case _ if (results.exists(_.isReturn)) => inconsistentContentCounter.incr() InconsistentReplication(results) case _ => failedCounter.incr() FailedReplication(results collect {case t@Throw(_) => t}) } } /** * Private helper to collect all underlying clients result for a given operation * and resolve them to the ReplicationStatus to tell the consistency */ private[this] def collectAndResolve[T](op: Client => Future[T]) = Future.collect(clients map { op(_).transform(Future.value) }) map { toReplicationStatus } } /** * Simple replication client wrapper that's compatible with base memcached client. * This simple replication client handles inconsistent state across underlying replicas in a * naive way: * - operation would succeed only if it succeeds on all replicas * - inconsistent data across replicas will be treated as key missing * - any replica's failure will make the operation throw */ case class SimpleReplicationFailure(msg: String) extends Throwable(msg) class SimpleReplicationClient(underlying: BaseReplicationClient) extends Client { def this(clients: Seq[Client], statsReceiver: StatsReceiver = NullStatsReceiver) = this(new BaseReplicationClient(clients, statsReceiver)) private[this] val underlyingClient = underlying /** * Returns the first result found within all replicas, or miss/failure depends on the last replica */ def getResult(keys: Iterable[String]): Future[GetResult] = underlyingClient.getResult(keys, useRandomOrder = false) /** * Only returns the consistent result from all replicas; if the data is inconsistent, this client simply * returns nothing just like key missing; if any failure occurs, this method returns failure * as there's a great chance the check-and-set won't succeed. */ def getsResult(keys: Iterable[String]): Future[GetsResult] = underlyingClient.getsAll(keys) map { resultsMap => val getsResultSeq = resultsMap map { case (key, ConsistentReplication(Some((value, RCasUnique(uniques))))) => val newCas = uniques map { case Buf.Utf8(s) => s } mkString("|") val newValue = Value(Buf.Utf8(key), value, Some(Buf.Utf8(newCas))) GetsResult(GetResult(hits = Map(key -> newValue))) case (key, ConsistentReplication(None)) => GetsResult(GetResult(misses = Set(key))) case (key, InconsistentReplication(resultsSeq)) if resultsSeq.forall(_.isReturn) => GetsResult(GetResult(misses = Set(key))) case (key, _) => GetsResult(GetResult(failures = Map(key -> SimpleReplicationFailure("One or more underlying replica failed gets")))) } GetResult.merged(getsResultSeq.toSeq) } /** * Store a key in all replicas, succeed only if all replicas succeed. */ def set(key: String, flags: Int, expiry: Time, value: Buf) = resolve[Unit]("set", _.set(key, flags, expiry, value), ()) /** * Check and set a key, succeed only if all replicas succeed. */ def checkAndSet(key: String, flags: Int, expiry: Time, value: Buf, casUnique: Buf): Future[CasResult] = { val Buf.Utf8(casUniqueStr) = casUnique val casUniqueBufs = casUniqueStr.split('|') map { Buf.Utf8(_) } resolve[CasResult]("checkAndSet", _.checkAndSet(key, flags, expiry, value, casUniqueBufs), CasResult.Stored) } /** * Delete a key from all replicas, succeed only if all replicas succeed. */ def delete(key: String) = resolve[JBoolean]("delete", _.delete(key), false) /** * Add a new key to all replicas, succeed only if all replicas succeed. */ def add(key: String, flags: Int, expiry: Time, value: Buf) = resolve[JBoolean]("add", _.add(key, flags, expiry, value), false) /** * Replace an existing key in all replicas, succeed only if all replicas succeed. */ def replace(key: String, flags: Int, expiry: Time, value: Buf) = resolve[JBoolean]("replace", _.replace(key, flags, expiry, value), false) /** * Increase an existing key in all replicas, succeed only if all replicas succeed. */ def incr(key: String, delta: Long): Future[Option[JLong]] = resolve[Option[JLong]]("incr", _.incr(key, delta), None) /** * Decrease an existing key in all replicas, succeed only if all replicas succeed. */ def decr(key: String, delta: Long): Future[Option[JLong]] = resolve[Option[JLong]]("decr", _.decr(key, delta), None) /** * Private helper to resolve a replication operation result from BaseReplicationClient to * a single value type, with using given default value in case of data inconsistency. */ private[this] def resolve[T](name: String, op: BaseReplicationClient => Future[ReplicationStatus[T]], default: T): Future[T] = op(underlyingClient) flatMap { case ConsistentReplication(r) => Future.value(r) case InconsistentReplication(resultsSeq) if resultsSeq.forall(_.isReturn) => Future.value(default) case _ => Future.exception(SimpleReplicationFailure("One or more underlying replica failed op: " + name)) } def append(key: String, flags: Int, expiry: Time, value: Buf): Future[JBoolean] = throw new UnsupportedOperationException("append is not supported for replication cache client yet.") def prepend(key: String, flags: Int, expiry: Time, value: Buf): Future[JBoolean] = throw new UnsupportedOperationException("prepend is not supported for replication cache client yet.") def stats(args: Option[String]): Future[Seq[String]] = throw new UnsupportedOperationException("No logical way to perform stats without a key") def release() { underlyingClient.release() } }
adriancole/finagle
finagle-memcached/src/main/scala/com/twitter/finagle/memcached/replication/ReplicationClient.scala
Scala
apache-2.0
18,714
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.presto.server import java.sql.{Connection, DriverManager, ResultSet} import java.util import java.util.{Locale, Optional, Properties} import scala.collection.JavaConverters._ import scala.util.{Failure, Success, Try} import com.facebook.presto.Session import com.facebook.presto.execution.QueryIdGenerator import com.facebook.presto.metadata.SessionPropertyManager import com.facebook.presto.spi.`type`.TimeZoneKey.UTC_KEY import com.facebook.presto.spi.security.Identity import com.facebook.presto.tests.DistributedQueryRunner import com.google.common.collect.ImmutableMap import org.slf4j.{Logger, LoggerFactory} import org.apache.carbondata.presto.CarbondataPlugin object PrestoServer { val CARBONDATA_CATALOG = "carbondata" val CARBONDATA_CONNECTOR = "carbondata" val CARBONDATA_SOURCE = "carbondata" val logger: Logger = LoggerFactory.getLogger(this.getClass) val prestoProperties: util.Map[String, String] = Map(("http-server.http.port", "8086")).asJava createSession val queryRunner = new DistributedQueryRunner(createSession, 4, prestoProperties) /** * start the presto server * * @param carbonStorePath the store path of carbon */ def startServer(carbonStorePath: String) = { logger.info("======== STARTING PRESTO SERVER ========") val queryRunner: DistributedQueryRunner = createQueryRunner( prestoProperties, carbonStorePath) logger.info("STARTED SERVER AT :" + queryRunner.getCoordinator.getBaseUrl) } /** * Instantiates the Presto Server to connect with the Apache CarbonData */ private def createQueryRunner(extraProperties: util.Map[String, String], carbonStorePath: String): DistributedQueryRunner = { Try { queryRunner.installPlugin(new CarbondataPlugin) val carbonProperties = ImmutableMap.builder[String, String] .put("carbondata-store", carbonStorePath) .put("carbon.unsafe.working.memory.in.mb", "512").build // CreateCatalog will create a catalog for CarbonData in etc/catalog. queryRunner.createCatalog(CARBONDATA_CATALOG, CARBONDATA_CONNECTOR, carbonProperties) } match { case Success(result) => queryRunner case Failure(exception) => queryRunner.close() throw exception } } /** * stop the presto server */ def stopServer(): Unit = { queryRunner.close() logger.info("***** Stopping The Server *****") } /** * execute the query by establishing the jdbc connection * * @param query * @return */ def executeQuery(query: String): List[Map[String, Any]] = { Try { val conn: Connection = createJdbcConnection logger.info(s"***** executing the query ***** \\n $query") val statement = conn.createStatement() val result: ResultSet = statement.executeQuery(query) convertResultSetToList(result) } match { case Success(result) => result case Failure(jdbcException) => logger .error(s"exception occurs${ jdbcException.getMessage } \\n query failed $query") throw jdbcException } } /** * Creates a JDBC Client to connect CarbonData to Presto * * @return */ private def createJdbcConnection: Connection = { val JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver" val DB_URL = "jdbc:presto://localhost:8086/carbondata/testdb" val properties = new Properties // The database Credentials properties.setProperty("user", "test"); // STEP 2: Register JDBC driver Class.forName(JDBC_DRIVER) // STEP 3: Open a connection DriverManager.getConnection(DB_URL, properties) } /** * convert result set into scala list of map * each map represents a row * * @param queryResult * @return */ private def convertResultSetToList(queryResult: ResultSet): List[Map[String, Any]] = { val metadata = queryResult.getMetaData val colNames = (1 to metadata.getColumnCount) map metadata.getColumnName Iterator.continually(buildMapFromQueryResult(queryResult, colNames)).takeWhile(_.isDefined) .map(_.get).toList } private def buildMapFromQueryResult(queryResult: ResultSet, colNames: Seq[String]): Option[Map[String, Any]] = { if (queryResult.next()) { Some(colNames.map(name => name -> queryResult.getObject(name)).toMap) } else { None } } /** * CreateSession will create a new session in the Server to connect and execute queries. */ private def createSession: Session = { logger.info("\\n Creating The Presto Server Session") Session.builder(new SessionPropertyManager) .setQueryId(new QueryIdGenerator().createNextQueryId) .setIdentity(new Identity("user", Optional.empty())) .setSource(CARBONDATA_SOURCE).setCatalog(CARBONDATA_CATALOG) .setTimeZoneKey(UTC_KEY).setLocale(Locale.ENGLISH) .setRemoteUserAddress("address") .setUserAgent("agent").build } }
jatin9896/incubator-carbondata
integration/presto/src/test/scala/org/apache/carbondata/presto/server/PrestoServer.scala
Scala
apache-2.0
5,739
package mesosphere.marathon.integration.setup import java.io.File import mesosphere.marathon.api.v2.json.V2AppDefinition import mesosphere.marathon.health.HealthCheck import mesosphere.marathon.state.{ AppDefinition, PathId } import org.apache.commons.io.FileUtils import org.apache.zookeeper.{ WatchedEvent, Watcher, ZooKeeper } import org.scalatest.{ BeforeAndAfterAllConfigMap, ConfigMap, Suite } import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ import scala.concurrent.duration.{ FiniteDuration, _ } import scala.util.Try object SingleMarathonIntegrationTest { private val log = LoggerFactory.getLogger(getClass) } /** * Convenient trait to test against one marathon instance. * Following things are managed at start: * (-) a marathon instance is launched. * (-) all existing groups, apps and event listeners are removed. * (-) a local http server is launched. * (-) a callback event handler is registered. * (-) this test suite is registered as callback event listener. every event is stored in a queue. * (-) a marathonFacade is provided * * After the test is finished, everything will be clean up. */ trait SingleMarathonIntegrationTest extends ExternalMarathonIntegrationTest with BeforeAndAfterAllConfigMap with MarathonCallbackTestSupport { self: Suite => import SingleMarathonIntegrationTest.log /** * We only want to fail for configuration problems if the configuration is actually used. */ private var configOption: Option[IntegrationTestConfig] = None def config: IntegrationTestConfig = configOption.get lazy val appMock: AppMockFacade = new AppMockFacade() val testBasePath: PathId = PathId("/marathonintegrationtest") override lazy val marathon: MarathonFacade = new MarathonFacade(config.marathonUrl, testBasePath) def extraMarathonParameters: List[String] = List.empty[String] lazy val marathonProxy = { startMarathon(config.marathonBasePort + 1, "--master", config.master, "--event_subscriber", "http_callback") new MarathonFacade(config.copy(marathonBasePort = config.marathonBasePort + 1).marathonUrl, testBasePath) } implicit class PathIdTestHelper(path: String) { def toRootTestPath: PathId = testBasePath.append(path).canonicalPath() def toTestPath: PathId = testBasePath.append(path) } override protected def beforeAll(configMap: ConfigMap): Unit = { log.info("Setting up local mesos/marathon infrastructure...") configOption = Some(IntegrationTestConfig(configMap)) super.beforeAll(configMap) if (!config.useExternalSetup) { log.info("Setting up local mesos/marathon infrastructure...") ProcessKeeper.startZooKeeper(config.zkPort, "/tmp/foo/single") ProcessKeeper.startMesosLocal() cleanMarathonState() val parameters = List( "--master", config.master, "--event_subscriber", "http_callback", "--access_control_allow_origin", "*", "--min_revive_offers_interval", "100" ) ++ extraMarathonParameters startMarathon(config.marathonBasePort, parameters: _*) log.info("Setting up local mesos/marathon infrastructure: done.") } else { log.info("Using already running Marathon at {}", config.marathonUrl) } startCallbackEndpoint(config.httpPort, config.cwd) } override protected def afterAll(configMap: ConfigMap): Unit = { super.afterAll(configMap) log.info("Cleaning up local mesos/marathon structure...") cleanUp(withSubscribers = !config.useExternalSetup) ExternalMarathonIntegrationTest.healthChecks.clear() ProcessKeeper.stopAllServices() ProcessKeeper.stopAllProcesses() ProcessKeeper.stopOSProcesses("mesosphere.marathon.integration.setup.AppMock") system.shutdown() system.awaitTermination() log.info("Cleaning up local mesos/marathon structure: done.") } def cleanMarathonState() { val watcher = new Watcher { override def process(event: WatchedEvent): Unit = println(event) } val zooKeeper = new ZooKeeper(config.zkHostAndPort, 30 * 1000, watcher) def deletePath(path: String) { if (zooKeeper.exists(path, false) != null) { val children = zooKeeper.getChildren(path, false) children.asScala.foreach(sub => deletePath(s"$path/$sub")) zooKeeper.delete(path, -1) } } deletePath(config.zkPath) zooKeeper.close() } def waitForTasks(appId: PathId, num: Int, maxWait: FiniteDuration = 30.seconds): List[ITEnrichedTask] = { def checkTasks: Option[List[ITEnrichedTask]] = { val tasks = Try(marathon.tasks(appId)).map(_.value).getOrElse(Nil) if (tasks.size == num) Some(tasks) else None } WaitTestSupport.waitFor(s"$num tasks to launch", maxWait)(checkTasks) } def waitForHealthCheck(check: IntegrationHealthCheck, maxWait: FiniteDuration = 30.seconds) = { WaitTestSupport.waitUntil("Health check to get queried", maxWait) { check.pinged } } private def appProxyMainInvocationImpl: String = { val javaExecutable = sys.props.get("java.home").fold("java")(_ + "/bin/java") val classPath = sys.props.getOrElse("java.class.path", "target/classes").replaceAll(" ", "") val main = classOf[AppMock].getName s"""$javaExecutable -Xmx64m -classpath $classPath $main""" } /** * Writes the appProxy invocation command into a shell script -- otherwise the whole log * of the test is spammed by overly long classpath definitions. */ private lazy val appProxyMainInvocation: String = { val file = File.createTempFile("appProxy", ".sh") file.deleteOnExit() FileUtils.write(file, s"""#!/bin/sh |exec $appProxyMainInvocationImpl $$*""".stripMargin) file.setExecutable(true) file.getAbsolutePath } def v2AppProxy(appId: PathId, versionId: String, instances: Int, withHealth: Boolean = true, dependencies: Set[PathId] = Set.empty): V2AppDefinition = V2AppDefinition(appProxy(appId, versionId, instances, withHealth, dependencies)) private[this] def appProxy(appId: PathId, versionId: String, instances: Int, withHealth: Boolean, dependencies: Set[PathId]): AppDefinition = { val mainInvocation = appProxyMainInvocation val exec = Some(s"""$mainInvocation $appId $versionId http://localhost:${config.httpPort}/health$appId/$versionId""") val health = if (withHealth) Set(HealthCheck(gracePeriod = 20.second, interval = 1.second, maxConsecutiveFailures = 10)) else Set.empty[HealthCheck] AppDefinition(appId, exec, executor = "//cmd", instances = instances, cpus = 0.5, mem = 128.0, healthChecks = health, dependencies = dependencies) } def appProxyCheck(appId: PathId, versionId: String, state: Boolean): IntegrationHealthCheck = { //this is used for all instances, as long as there is no specific instance check //the specific instance check has also a specific port, which is assigned by mesos val check = new IntegrationHealthCheck(appId, versionId, 0, state) ExternalMarathonIntegrationTest.healthChecks .filter(c => c.appId == appId && c.versionId == versionId) .foreach(ExternalMarathonIntegrationTest.healthChecks -= _) ExternalMarathonIntegrationTest.healthChecks += check check } def taskProxyChecks(appId: PathId, versionId: String, state: Boolean): Seq[IntegrationHealthCheck] = { marathon.tasks(appId).value.flatMap(_.ports).map { port => val check = new IntegrationHealthCheck(appId, versionId, port, state) ExternalMarathonIntegrationTest.healthChecks .filter(c => c.appId == appId && c.versionId == versionId) .foreach(ExternalMarathonIntegrationTest.healthChecks -= _) ExternalMarathonIntegrationTest.healthChecks += check check } } def cleanUp(withSubscribers: Boolean = false, maxWait: FiniteDuration = 30.seconds) { events.clear() ExternalMarathonIntegrationTest.healthChecks.clear() try { val deleteResult: RestResult[ITDeploymentResult] = marathon.deleteGroup(testBasePath, force = true) if (deleteResult.code != 404) { waitForChange(deleteResult) } } catch { case e: spray.httpx.UnsuccessfulResponseException if e.response.status.intValue == 404 => // ignore } WaitTestSupport.waitUntil("cleanUp", maxWait) { marathon.listAppsInBaseGroup.value.isEmpty && marathon.listGroupsInBaseGroup.value.isEmpty } if (withSubscribers) marathon.listSubscribers.value.urls.foreach(marathon.unsubscribe) events.clear() } }
mikejihbe/marathon
src/test/scala/mesosphere/marathon/integration/setup/SingleMarathonIntegrationTest.scala
Scala
apache-2.0
8,494
package com.wavesplatform.state.diffs import com.wavesplatform.account.KeyPair import com.wavesplatform.common.state.ByteStr import com.wavesplatform.db.WithState import com.wavesplatform.features.BlockchainFeatures import com.wavesplatform.lagonaki.mocks.TestBlock.{create => block} import com.wavesplatform.settings.TestFunctionalitySettings import com.wavesplatform.state.{BinaryDataEntry, BooleanDataEntry, IntegerDataEntry} import com.wavesplatform.test.PropSpec import com.wavesplatform.transaction.{GenesisTransaction, TxHelpers} class DataTransactionDiffTest extends PropSpec with WithState { val fs = TestFunctionalitySettings.Enabled.copy(preActivatedFeatures = Map(BlockchainFeatures.DataTransaction.id -> 0)) val baseSetup: (GenesisTransaction, KeyPair) = { val master = TxHelpers.signer(1) val genesis = TxHelpers.genesis(master.toAddress) (genesis, master) } property("state invariants hold") { val setup = { val (genesis, master) = baseSetup val key1 = "key1" val item1 = IntegerDataEntry(key1, 1) val dataTx1 = TxHelpers.data(master, List(item1)) val key2 = "key2" val item2 = BooleanDataEntry(key2, true) val dataTx2 = TxHelpers.data(master, List(item2)) val item3 = IntegerDataEntry(key1, 3) val dataTx3 = TxHelpers.data(master, List(item3)) (genesis, Seq(item1, item2, item3), Seq(dataTx1, dataTx2, dataTx3)) } val (genesisTx, items, txs) = setup val sender = txs.head.sender val genesis = block(Seq(genesisTx)) val blocks = txs.map(tx => block(Seq(tx))) val item1 = items.head assertDiffAndState(Seq(genesis), blocks(0), fs) { case (totalDiff, state) => assertBalanceInvariant(totalDiff) state.balance(sender.toAddress) shouldBe (ENOUGH_AMT - txs(0).fee) state.accountData(sender.toAddress, item1.key) shouldBe Some(item1) } val item2 = items(1) assertDiffAndState(Seq(genesis, blocks(0)), blocks(1), fs) { case (totalDiff, state) => assertBalanceInvariant(totalDiff) state.balance(sender.toAddress) shouldBe (ENOUGH_AMT - txs.take(2).map(_.fee).sum) state.accountData(sender.toAddress, item1.key) shouldBe Some(item1) state.accountData(sender.toAddress, item2.key) shouldBe Some(item2) } val item3 = items(2) assertDiffAndState(Seq(genesis, blocks(0), blocks(1)), blocks(2), fs) { case (totalDiff, state) => assertBalanceInvariant(totalDiff) state.balance(sender.toAddress) shouldBe (ENOUGH_AMT - txs.map(_.fee).sum) state.accountData(sender.toAddress, item1.key) shouldBe Some(item3) state.accountData(sender.toAddress, item2.key) shouldBe Some(item2) } } property("cannot overspend funds") { val setup = { val (genesis, master) = baseSetup val dataTx = TxHelpers.data(master, List(BinaryDataEntry("key", ByteStr.fill(64)(1))), fee = ENOUGH_AMT + 1) (genesis, dataTx) } val (genesis, dataTx) = setup assertDiffEi(Seq(block(Seq(genesis))), block(Seq(dataTx)), fs) { blockDiffEi => blockDiffEi should produce("negative waves balance") } } property("validation fails prior to feature activation") { val setup = { val (genesis, master) = baseSetup val dataTx = TxHelpers.data(master, List()) (genesis, dataTx) } val settings = TestFunctionalitySettings.Enabled.copy(preActivatedFeatures = Map(BlockchainFeatures.DataTransaction.id -> 10)) val (genesis, data) = setup assertDiffEi(Seq(block(Seq(genesis))), block(Seq(data)), settings) { blockDiffEi => blockDiffEi should produce("Data Transaction feature has not been activated") } } }
wavesplatform/Waves
node/src/test/scala/com/wavesplatform/state/diffs/DataTransactionDiffTest.scala
Scala
mit
3,724
package data import java.io.File import config.{dataFile, getFirstInt} object Tutors { lazy val dummy: Tutors = fromFile(dataFile("tutors-dummy.txt")) def splitAtEachTab(string: String): Seq[String] = splitAtEach('\\t', string) def splitAtEach(char: Char, string: String): Seq[String] = { val charLocations = string.zipWithIndex.flatMap { case (c, i) if c == char => Some(i) case _ => None } val starting = 0 +: charLocations.map(_ + 1) val ending = charLocations :+ string.length (starting, ending).zipped.map { case (start, end) => string.substring(start, end) } } def fromFile(file: File): Tutors = { val line = io.Source.fromFile(file).getLines() val numberOfSlots = getFirstInt(line.next) val numberOfTutors = getFirstInt(line.next) val slotNames = splitAtEachTab(line.next).tail.tail // skip 2 tabs assert(slotNames.size == numberOfSlots) val tutorNames = collection.mutable.MutableList.empty[String] val usernames = collection.mutable.MutableList.empty[String] val tutorAvailability = Range(0, numberOfTutors).map { case i => val username +: name +: avail = splitAtEachTab(line.next) usernames += username tutorNames += name assert(avail.size <= numberOfSlots) avail.zipWithIndex.flatMap { case ("", i) => None case (_ , i) => Some(i) // nonempty string means okay } } Tutors( slotNames = slotNames.toVector, tutorNames = tutorNames.toVector, usernames = usernames.toVector, availability = tutorAvailability) } // convert students to tutors def fromStudents(students: Seq[Student]): Tutors = { val slotNames = config.slotNames val tutors = students.toVector val tutorNames = tutors.map(_.name) val usernames = tutors.map(_.username) val availability = tutors.map(s => translateStudentAvailability(s.availability)) Tutors( slotNames = slotNames, tutorNames = tutorNames, usernames = usernames, availability = availability) } // convert student availability to tutor availability def translateStudentAvailability(avail: Seq[Boolean]): Seq[Int] = Range(0, avail.length).filter(avail) } case class Tutors ( slotNames : IndexedSeq[String], tutorNames : IndexedSeq[String], usernames : IndexedSeq[String], availability : IndexedSeq[Seq[Int]] ) { def formatSlotTutor(slot: Int, tutor: Int): String = s"${slotNames(slot)}-${usernames(tutor)}" }
yfcai/tutorial-assignment
main/data.Tutors.scala
Scala
unlicense
2,581
/* * Copyright 2012-2020 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.edda import scala.actors.Actor import java.util.concurrent.ThreadPoolExecutor import com.netflix.servo.DefaultMonitorRegistry import com.netflix.servo.monitor.Monitors import org.slf4j.LoggerFactory /** a pseudo collection made up of other related collections. This allows multiple collections * of the same type (but crawled for different accounts) to appear as one unified * collection * @param name the root name of the collection (usually name from RootCollection base class) * @param collections list of common collections that should appear unified */ class MergedCollection(val name: String, val collections: Seq[Collection]) extends Queryable { override def toString = "[MergedCollection " + name + "]" import Queryable._ import Utils._ private[this] val logger = LoggerFactory.getLogger(getClass) /** handle Query Message for MergedCollection */ private def localTransitions: PartialFunction[(Any, StateMachine.State), StateMachine.State] = { case (gotMsg @ Query(from, queryMap, limit, live, keys, replicaOk), state) => { implicit val req = gotMsg.req val replyTo = sender import QueryExecutionContext._ scala.concurrent.future { if (collections.size == 1) { collections.head.query( gotMsg.query, gotMsg.limit, gotMsg.live, gotMsg.keys, gotMsg.replicaOk ) onComplete { case scala.util.Success(recs: Seq[Record]) => { val msg = QueryResult(this, recs) if (logger.isDebugEnabled) logger.debug(s"$req${Actor.self} sending: $msg -> $replyTo") replyTo ! msg } case scala.util.Failure(error) => { if (logger.isErrorEnabled) logger.error( s"$req query on ${collections.head} failed: $gotMsg with error: $error" ) val msg = QueryError(this, error) if (logger.isDebugEnabled) logger.debug(s"$req${Actor.self} sending: $msg -> $replyTo") replyTo ! msg } } } else { val futures = collections.map( _.query(gotMsg.query, gotMsg.limit, gotMsg.live, gotMsg.keys, gotMsg.replicaOk) ) try { val recs = futures.flatMap( f => scala.concurrent.Await.result( f, scala.concurrent.duration.Duration(60000, scala.concurrent.duration.MILLISECONDS) ) ) val msg = QueryResult(this, firstOf(limit, recs.sortWith((a, b) => a.stime.isAfter(b.stime)))) if (logger.isDebugEnabled) logger.debug(s"$req${Actor.self} sending: $msg -> $replyTo") replyTo ! msg } catch { case e: Exception => { val msg = QueryError(this, e) if (logger.isDebugEnabled) logger.debug(s"$req${Actor.self} sending: $msg -> $replyTo") replyTo ! msg } } } } state } } override protected def transitions = localTransitions orElse super.transitions protected def doQuery( queryMap: Map[String, Any], limit: Int, live: Boolean, keys: Set[String], replicaOk: Boolean, state: StateMachine.State )(implicit req: RequestId): Seq[Record] = { throw new java.lang.RuntimeException("doQuery on MergedCollection should not be called") } /** start the actors for all the merged collections then start this actor */ override def start() = { Monitors.registerObject("edda.collection.merged." + name, this) DefaultMonitorRegistry .getInstance() .register( Monitors.newThreadPoolMonitor( s"edda.collection.merged.$name.threadpool", this.pool.asInstanceOf[ThreadPoolExecutor] ) ) if (logger.isInfoEnabled) logger.info("Starting " + this) collections.foreach(_.start()) super.start() } /** stop the actors for all the merged collections then stop this actor */ override def stop()(implicit req: RequestId) { if (logger.isInfoEnabled) logger.info("Stopping " + this) collections.foreach(_.stop()) super.stop() } }
Netflix/edda
src/main/scala/com/netflix/edda/MergedCollection.scala
Scala
apache-2.0
4,922
package bifrost /* * @startuml * car --|> wheel * @enduml */ import akka.actor.{ActorRef, Props} import bifrost.api.http._ import bifrost.blocks.BifrostBlock import bifrost.forging.{Forger, ForgingSettings} import bifrost.history.BifrostSyncInfoMessageSpec import bifrost.network.BifrostNodeViewSynchronizer import bifrost.scorexMod.GenericApplication import bifrost.transaction.box.BifrostBox import io.circe import bifrost.api.http.{ApiRoute, UtilsApiRoute} import bifrost.network.message.MessageSpec import bifrost.transaction.box.proposition.ProofOfKnowledgeProposition import bifrost.transaction.state.PrivateKey25519 import java.lang.management.ManagementFactory import bifrost.transaction.bifrostTransaction.BifrostTransaction import com.sun.management.HotSpotDiagnosticMXBean import scala.reflect.runtime.universe._ class BifrostApp(val settingsFilename: String) extends GenericApplication with Runnable { // use for debug only // val path: Path = Path ("/tmp") // Try(path.deleteRecursively() // // // // ) override type P = ProofOfKnowledgeProposition[PrivateKey25519] override type BX = BifrostBox override type TX = BifrostTransaction override type PMOD = BifrostBlock override type NVHT = BifrostNodeViewHolder implicit lazy val settings = new ForgingSettings { override val settingsJSON: Map[String, circe.Json] = settingsFromFile(settingsFilename) } log.debug(s"Starting application with settings \\n$settings") override protected lazy val additionalMessageSpecs: Seq[MessageSpec[_]] = Seq(BifrostSyncInfoMessageSpec) override val nodeViewHolderRef: ActorRef = actorSystem.actorOf(Props(new NVHT(settings))) val forger: ActorRef = actorSystem.actorOf(Props(classOf[Forger], settings, nodeViewHolderRef)) override val localInterface: ActorRef = actorSystem.actorOf( Props(classOf[BifrostLocalInterface], nodeViewHolderRef, forger, settings) ) override val nodeViewSynchronizer: ActorRef = actorSystem.actorOf( Props(classOf[BifrostNodeViewSynchronizer], networkController, nodeViewHolderRef, localInterface, BifrostSyncInfoMessageSpec) ) override val apiRoutes: Seq[ApiRoute] = Seq( DebugApiRoute(settings, nodeViewHolderRef), WalletApiRoute(settings, nodeViewHolderRef), ProgramApiRoute(settings, nodeViewHolderRef, networkController), AssetApiRoute(settings, nodeViewHolderRef), UtilsApiRoute(settings), // GenericNodeViewApiRoute[P, TX](settings, nodeViewHolderRef), // PeersApiRoute(peerManagerRef, networkController, settings), NodeViewApiRoute(settings, nodeViewHolderRef) ) override val apiTypes: Seq[Type] = Seq(typeOf[UtilsApiRoute], typeOf[DebugApiRoute], typeOf[WalletApiRoute], typeOf[ProgramApiRoute], typeOf[AssetApiRoute], // typeOf[GenericNodeViewApiRoute[P, TX]], // typeOf[PeersApiRoute], typeOf[NodeViewApiRoute]) // Am I running on a JDK that supports JVMCI? val vm_version = System.getProperty("java.vm.version") System.out.printf("java.vm.version = %s%n", vm_version) val bean = ManagementFactory.getPlatformMXBean(classOf[HotSpotDiagnosticMXBean]) // Is JVMCI enabled? val enableJVMCI = bean.getVMOption("EnableJVMCI") System.out.println(enableJVMCI) // Is the system using the JVMCI compiler for normal compilations? val useJVMCICompiler = bean.getVMOption("UseJVMCICompiler") System.out.println(useJVMCICompiler) // What compiler is selected? val compiler = System.getProperty("jvmci.Compiler") System.out.printf("jvmci.Compiler = %s%n", compiler) //touching lazy vals forger localInterface nodeViewSynchronizer /*val scheduler = actorSystem.scheduler val task = new Runnable { def run(): Unit = { networkController ! Message(ProducerNotifySpec, Left( ProducerProposal( ByteString.copyFrom("testProducer".getBytes), ProposalDetails(assetCode = "assetCode"), ByteString.copyFrom("signature".getBytes), Instant.now.toEpochMilli ).toByteArray ), Some(null)) } } implicit val executor = actorSystem.dispatcher scheduler.schedule(initialDelay = Duration(10000, TimeUnit.MILLISECONDS), interval = Duration(7000, TimeUnit.MILLISECONDS), task)*/ // if (settings.nodeName == "node1") { // log.info("Starting transactions generation") // val generator: ActorRef = actorSystem.actorOf(Props(classOf[PolyTransferGenerator], nodeViewHolderRef)) // generator ! StartGeneration(FiniteDuration(5, SECONDS)) // } } object BifrostApp extends App { val settingsFilename = args.headOption.getOrElse("testnet-private.json") new BifrostApp(settingsFilename).run() }
Topl/Project-Bifrost
src/main/scala/bifrost/BifrostApp.scala
Scala
mpl-2.0
5,028
package com.eigengo.lift.exercise.classifiers.model.provers import com.eigengo.lift.exercise.classifiers.ExerciseModel import com.eigengo.lift.exercise.classifiers.model.ModelGenerators import com.typesafe.config.ConfigFactory import org.scalatest._ import org.scalatest.prop._ import scala.concurrent.ExecutionContext.Implicits.global class CVC4Test extends PropSpec with PropertyChecks with Matchers with BeforeAndAfterAll with concurrent.ScalaFutures with ModelGenerators { import ExerciseModel._ val cvc4 = new CVC4(ConfigFactory.load("test.conf")) override def afterAll() { println("CVC4 prover statistics:") for ((key, value) <- cvc4.statistics) { println(s"$key = $value") } } property("valid(p --> p)") { forAll(QueryGen()) { (query: Query) => assert(cvc4.valid(Or(ExerciseModel.not(query), query)).futureValue) } } property("satisfiable(p --> p)") { forAll(QueryGen()) { (query: Query) => assert(cvc4.satisfiable(Or(ExerciseModel.not(query), query)).futureValue) } } property("valid(p & q --> p)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.valid(Or(ExerciseModel.not(And(query1, query2)), query1)).futureValue) } } property("satisfiable(p & q --> p)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.satisfiable(Or(ExerciseModel.not(And(query1, query2)), query1)).futureValue) } } property("valid(p & q --> q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.valid(Or(ExerciseModel.not(And(query1, query2)), query2)).futureValue) } } property("satisfiable(p & q --> q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.satisfiable(Or(ExerciseModel.not(And(query1, query2)), query2)).futureValue) } } property("valid(p --> p | q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.valid(Or(ExerciseModel.not(query1), Or(query1, query2))).futureValue) } } property("satisfiable(p --> p | q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.satisfiable(Or(ExerciseModel.not(query1), Or(query1, query2))).futureValue) } } property("valid(q --> p | q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.valid(Or(ExerciseModel.not(query2), Or(query1, query2))).futureValue) } } property("satisfiable(q --> p | q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.satisfiable(Or(ExerciseModel.not(query2), Or(query1, query2))).futureValue) } } property("valid(p & (p --> q) --> q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.valid(Or(ExerciseModel.not(And(query1, Or(ExerciseModel.not(query1), query2))), query2)).futureValue) } } property("satisfiable(p & (p --> q) --> q)") { forAll(QueryGen(), QueryGen()) { (query1: Query, query2: Query) => assert(cvc4.satisfiable(Or(ExerciseModel.not(And(query1, Or(ExerciseModel.not(query1), query2))), query2)).futureValue) } } property("valid(p <-> simplify(p))") { forAll(QueryGen()) { (query: Query) => assert(cvc4.valid(Or(ExerciseModel.not(query), cvc4.simplify(query).futureValue)).futureValue) assert(cvc4.valid(Or(query, cvc4.simplify(ExerciseModel.not(query)).futureValue)).futureValue) assert(cvc4.valid(Or(query, ExerciseModel.not(cvc4.simplify(query).futureValue))).futureValue) } } }
teroxik/open-muvr
server/exercise/src/test/scala/com/eigengo/lift/exercise/classifiers/model/provers/CVC4Test.scala
Scala
apache-2.0
3,652
/* * Copyright (c) 2016 Frank S. Thomas * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package shapeless import org.junit.Assert._ import org.junit.Test import scala.concurrent.duration.Duration import shapeless.test.compileTime class CompileTimeTests { @Test def testCompileTime { assertTrue(compileTime(""" val x = 42 """) > Duration.Zero) } }
rorygraves/perf_tester
corpus/shapeless/src/test/scala/shapeless/compiletime.scala
Scala
apache-2.0
876
import sbt._ object PluginDef extends Build { lazy val root = Project("plugins", file(".")) dependsOn( g8plugin ) lazy val g8plugin = ProjectRef(uri("git://github.com/n8han/giter8#0.4.5.1"), "giter8-plugin") }
ikuo/android-app-scala.g8
project/project/plugins.scala
Scala
mit
218
/*********************************************************************** * Copyright (c) 2013-2020 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.features.serialization import java.util.{Date, UUID} import com.typesafe.scalalogging.LazyLogging import org.geotools.util.factory.Hints import org.locationtech.geomesa.utils.text.WKBUtils import org.locationtech.jts.geom.Geometry // noinspection LanguageFeature trait GenericMapSerialization[T <: PrimitiveWriter, V <: PrimitiveReader] extends LazyLogging { def serialize(out: T, map: java.util.Map[_ <: AnyRef, _ <: AnyRef]): Unit def deserialize(in: V): java.util.Map[AnyRef, AnyRef] def deserialize(in: V, map: java.util.Map[AnyRef, AnyRef]): Unit protected def write(out: T, value: AnyRef): Unit = value match { case v: String => out.writeString(v) case v: java.lang.Integer => out.writeInt(v) case v: java.lang.Long => out.writeLong(v) case v: java.lang.Float => out.writeFloat(v) case v: java.lang.Double => out.writeDouble(v) case v: java.lang.Boolean => out.writeBoolean(v) case v: Date => out.writeLong(v.getTime) case v: Array[Byte] => writeBytes(out, v) case v: Geometry => writeGeometry(out, v) case v: UUID => out.writeLong(v.getMostSignificantBits); out.writeLong(v.getLeastSignificantBits) case v: java.util.List[AnyRef] => writeList(out, v) case v: Hints.Key => out.writeString(HintKeySerialization.keyToId(v)) case _ => throw new IllegalArgumentException(s"Unsupported value: $value (${value.getClass})") } /** * Read a key or value. Strings will be interned, as we expect a lot of duplication in user data, * i.e keys but also visibilities, which is the only user data we generally store * * @param in input * @param clas class of the item to read * @return */ protected def read(in: V, clas: Class[_]): AnyRef = clas match { case c if classOf[java.lang.String].isAssignableFrom(c) => in.readString().intern() case c if classOf[java.lang.Integer].isAssignableFrom(c) => Int.box(in.readInt()) case c if classOf[java.lang.Long].isAssignableFrom(c) => Long.box(in.readLong()) case c if classOf[java.lang.Float].isAssignableFrom(c) => Float.box(in.readFloat()) case c if classOf[java.lang.Double].isAssignableFrom(c) => Double.box(in.readDouble()) case c if classOf[java.lang.Boolean].isAssignableFrom(c) => Boolean.box(in.readBoolean()) case c if classOf[java.util.Date].isAssignableFrom(c) => new java.util.Date(in.readLong()) case c if classOf[Array[Byte]] == c => readBytes(in) case c if classOf[Geometry].isAssignableFrom(c) => readGeometry(in) case c if classOf[UUID].isAssignableFrom(c) => new UUID(in.readLong(), in.readLong()) case c if classOf[java.util.List[_]].isAssignableFrom(c) => readList(in) case c if classOf[Hints.Key].isAssignableFrom(c) => HintKeySerialization.idToKey(in.readString()) case _ => throw new IllegalArgumentException(s"Unsupported value class: $clas") } /** * Write a geometry * * @param out out * @param geom geometry */ protected def writeGeometry(out: T, geom: Geometry): Unit = writeBytes(out, WKBUtils.write(geom)) /** * Read a geometry * * @param in in * @return geometry */ protected def readGeometry(in: V): Geometry = WKBUtils.read(readBytes(in)) /** * Write bytes * * @param out out * @param bytes bytes */ protected def writeBytes(out: T, bytes: Array[Byte]): Unit /** * Read bytes * * @param in in * @return bytes */ protected def readBytes(in: V): Array[Byte] /** * Write a list * * @param out out * @param list list */ protected def writeList(out: T, list: java.util.List[AnyRef]): Unit /** * Read a list * * @param in in * @return list */ protected def readList(in: V): java.util.List[AnyRef] protected def canSerialize(obj: AnyRef): Boolean = obj match { case key: Hints.Key => HintKeySerialization.canSerialize(key) case _ => true } }
ccri/geomesa
geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/serialization/GenericMapSerialization.scala
Scala
apache-2.0
4,601
package nodes.learning import breeze.linalg._ import breeze.numerics._ import breeze.stats.distributions._ import org.scalatest.FunSuite import pipelines._ class ZCAWhiteningSuite extends FunSuite with LocalSparkContext with Logging { test("whitening") { val eps = 0.1 val nrows = 10000 val ndim = 10 val x = DenseMatrix.rand[Double](nrows, ndim, Gaussian(0.0, 1.0)) val whitener = new ZCAWhitenerEstimator(eps).fitSingle(x) val wx = whitener(x) //Checks max(max(abs(cov(whiten(x))) - eye(10)) < 2*eps assert(max(abs(cov(convert(wx, Double))) - DenseMatrix.eye[Double](ndim)) < 2*eps, "Whitening the base matrix should produce unit variance and zero covariance.") } }
shivaram/keystone
src/test/scala/nodes/learning/ZCAWhiteningSuite.scala
Scala
apache-2.0
716
/* * Copyright 2017-2022 Viktor Lövgren * * SPDX-License-Identifier: MIT */ package ciris.refined import cats.effect.IO import cats.effect.unsafe.implicits.global import cats.implicits._ import ciris._ import eu.timepit.refined.types.numeric.PosInt import org.scalatest.funsuite.AnyFunSuite final class RefinedSpec extends AnyFunSuite { test("refTypeConfigDecoder.success") { assert { val actual = default("1").as[PosInt].attempt[IO].unsafeRunSync().map(_.value) val expected = Right(1) actual == expected } } test("refTypeConfigDecoder.error") { assert { default("0").as[PosInt].attempt[IO].unsafeRunSync().isLeft } } }
vlovgr/ciris
modules/refined/src/test/scala/ciris/refined/RefinedSpec.scala
Scala
mit
678
package com.shalloui import com.shalloui.tblite.mapper.{Codec, _} import shapeless.{HNil, Typeable, _} import scala.language.implicitConversions /** * Created by a.reisberg on 8/31/2016. */ package object tblite { case object Empty case class QueryDocInfo(rev: String, _conflicts: List[String]) type Empty = Empty.type type Last = Empty :: HNil val Last: Last = Empty :: HNil implicit val ignoreCodec: Codec[Empty] = Codec.noHint[Empty.type] implicit val docInfoCodec = Codec.noHintAux[QueryDocInfo, JHashMap] implicit val stringCodec = Codec.aux[String, String] implicit def castableOps[T](t: T): CastableOps[T] = new CastableOps(t) final class CastableOps[T](val t: T) extends AnyVal { def to[U](implicit cast: Typeable[U]): Option[U] = cast.cast(t) def is[U](implicit cast: Typeable[U]): Boolean = cast.cast(t).isDefined } }
a-reisberg/typebase-lite
tblcore/src/main/scala/com/shalloui/tblite/package.scala
Scala
apache-2.0
877
/* Copyright 2013 Stephen K Samuel Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.sksamuel.scrimage.filter import com.sksamuel.scrimage.BufferedOpFilter /** @author Stephen Samuel */ class NoiseFilter(amount: Int, density: Double) extends BufferedOpFilter { val op = new thirdparty.jhlabs.image.NoiseFilter() op.setDensity(density.toFloat) op.setAmount(amount) } object NoiseFilter { def apply(): NoiseFilter = apply(25, 1) def apply(amount: Int, density: Double): NoiseFilter = new NoiseFilter(amount, density) }
carlosFattor/scrimage
scrimage-filters/src/main/scala/com/sksamuel/scrimage/filter/NoiseFilter.scala
Scala
apache-2.0
1,051
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package utils import play.api.mvc.Call object CallMap { private val callMap: Map[String, Call] = Map( "pensionsTaken" -> controllers.routes.IP2016Controller.pensionsTaken(), "pensionsTakenBefore" -> controllers.routes.IP2016Controller.pensionsTakenBefore(), "pensionsTakenBetween" -> controllers.routes.IP2016Controller.pensionsTakenBetween(), "overseasPensions" -> controllers.routes.IP2016Controller.overseasPensions(), "currentPensions" -> controllers.routes.IP2016Controller.currentPensions(), "pensionDebits" -> controllers.routes.IP2016Controller.pensionDebits(), "psoDetails" -> controllers.routes.IP2016Controller.psoDetails, "removePsoDetails" -> controllers.routes.IP2016Controller.removePsoDetails ) def get(name: String): Option[Call] = callMap.get(name) }
hmrc/pensions-lifetime-allowance-frontend
app/utils/CallMap.scala
Scala
apache-2.0
1,455
package barneshut import java.awt._ import java.awt.event._ import javax.swing._ import javax.swing.event._ import scala.collection.parallel.{TaskSupport, defaultTaskSupport} class SimulationModel { var screen = new Boundaries var bodies: Seq[Body] = Nil var quad: Quad = Empty(screen.centerX, screen.centerY, Float.MaxValue) var shouldRenderQuad = false var timeStats = new TimeStatistics var taskSupport: TaskSupport = defaultTaskSupport def initialize(parallelismLevel: Int, pattern: String, totalBodies: Int) { taskSupport = new collection.parallel.ForkJoinTaskSupport( new scala.concurrent.forkjoin.ForkJoinPool(parallelismLevel)) pattern match { case "two-galaxies" => init2Galaxies(totalBodies) case _ => sys.error(s"no such initial pattern: $pattern") } } def init2Galaxies(totalBodies: Int) { val bodyArray = new Array[Body](totalBodies) val random = new scala.util.Random(213L) def galaxy(from: Int, num: Int, maxradius: Float, cx: Float, cy: Float, sx: Float, sy: Float) { val totalM = 1.5f * num val blackHoleM = 1.0f * num val cubmaxradius = maxradius * maxradius * maxradius for (i <- from until (from + num)) { val b = if (i == from) { new Body(blackHoleM, cx, cy, sx, sy) } else { val angle = random.nextFloat * 2 * math.Pi val radius = 25 + maxradius * random.nextFloat val starx = cx + radius * math.sin(angle).toFloat val stary = cy + radius * math.cos(angle).toFloat val speed = math.sqrt(gee * blackHoleM / radius + gee * totalM * radius * radius / cubmaxradius) val starspeedx = sx + (speed * math.sin(angle + math.Pi / 2)).toFloat val starspeedy = sy + (speed * math.cos(angle + math.Pi / 2)).toFloat val starmass = 1.0f + 1.0f * random.nextFloat new Body(starmass, starx, stary, starspeedx, starspeedy) } bodyArray(i) = b } } galaxy(0, bodyArray.length / 8, 300.0f, 0.0f, 0.0f, 0.0f, 0.0f) galaxy(bodyArray.length / 8, bodyArray.length / 8 * 7, 350.0f, -1800.0f, -1200.0f, 0.0f, 0.0f) bodies = bodyArray.toSeq // compute center and boundaries screen = new Boundaries screen.minX = -2200.0f screen.minY = -1600.0f screen.maxX = 350.0f screen.maxY = 350.0f } }
mitochon/hexercise
src/mooc/parprog/week4barneshut/src/main/scala/barneshut/SimulationModel.scala
Scala
mit
2,363
package notebook.server import java.io._ import java.nio.charset.{StandardCharsets, Charset} import java.net.URLDecoder import java.text.SimpleDateFormat import java.util.Date import notebook.NBSerializer import notebook.NBSerializer._ import org.apache.commons.io.FileUtils import play.api.Logger import play.api.libs.json._ import utils.Const.UTF_8 class NotebookManager(val name: String, val notebookDir: File) { Logger.info("Notebook directory is: " + notebookDir.getCanonicalPath) val extension = ".snb" def getName(path: String) = path.split("/").filter(!_.isEmpty).last.dropRight(extension.length) def notebookFile(path: String): File = { val basePath = notebookDir.getCanonicalPath val decodedPath = URLDecoder.decode(path, UTF_8) val nbFile = new File(basePath, decodedPath) // This check is probably not strictly necessary due to URL encoding of name // (should escape any path traversal components), but let's be safe require(nbFile.getCanonicalPath.startsWith(basePath), "Unable to access notebook outside of notebooks path.") nbFile } def incrementFileName(base: String) = { Logger.info("Incremented Notebook at " + base) val newPath: String = Stream.from(1).map(base + _ + extension).dropWhile { fn => val snb = notebookFile(fn) val r = snb.exists() Logger.info(s"SNB ${snb.getAbsolutePath} exists: $r") r }.head Logger.info("Incremented Notebook is " + newPath) newPath } def newNotebook( path: String = "/", customLocalRepo: Option[String] = None, customRepos: Option[List[String]] = None, customDeps: Option[List[String]] = None, customImports: Option[List[String]] = None, customArgs: Option[List[String]] = None, customSparkConf: Option[JsObject] = None, name:Option[String] = None) = { val sep = if (path.last == '/') "" else "/" val fpath = name.map(path + sep + _ + extension).getOrElse(incrementFileName(path + sep + "Untitled")) val nb = Notebook( Some(new Metadata(getName(fpath), customLocalRepo = customLocalRepo, customRepos = customRepos, customDeps = customDeps, customImports = customImports, customArgs = customArgs, customSparkConf = customSparkConf)), Some(Nil), None, None, None ) save(fpath, nb, overwrite = false) fpath } def copyNotebook(nbPath: String) = { val nbData = getNotebook(nbPath) nbData.map { nb => val newPath = incrementFileName(nb._4.dropRight(extension.length)) val newName = getName(newPath) val oldNB = NBSerializer.read(nb._3) save(newPath, Notebook(oldNB.metadata.map(_.copy(name = newName)), oldNB.cells, oldNB.worksheets, oldNB.autosaved, None), false) newPath } getOrElse newNotebook() } def getNotebook(path: String) = { Logger.info(s"getNotebook at path $path") for (notebook <- load(path)) yield { val data = FileUtils.readFileToString(notebookFile(path), StandardCharsets.UTF_8) val df = new SimpleDateFormat("dd-MM-yyyy HH:mm:ss z'('Z')'") val last_mtime = df.format(new Date(notebookFile(path).lastModified())) (last_mtime, notebook.name, data, path) } } def deleteNotebook(path: String) = { Logger.info(s"deleteNotebook at path $path") val file = notebookFile(path) if (file.exists()) { file.delete() } } def rename(path: String, newpath: String) = { Logger.info(s"rename from path $path to $newpath") val newname = getName(newpath) val oldfile = notebookFile(path) Logger.debug(s"rename from path $path to $newpath: old file is ${oldfile.getAbsolutePath}") load(path).foreach { notebook => val nb = if (notebook.name != newname) { val meta = notebook.metadata.map(_.copy(name = newname)).orElse(Some(new Metadata(newname))) notebook.copy(metadata = meta) } else { notebook } val newfile = notebookFile(newpath) Logger.debug(s"rename from path $path to $newpath: new file is ${newfile.getAbsolutePath}") oldfile.renameTo(newfile) FileUtils.writeStringToFile(newfile, NBSerializer.write(nb)) } (newname, newpath) } def save(path: String, notebook: Notebook, overwrite: Boolean) = { Logger.info(s"save at path $path") val file = notebookFile(path) if (!overwrite && file.exists()) { throw new NotebookExistsException("Notebook " + path + " already exists.") } FileUtils.writeStringToFile(file, NBSerializer.write(notebook), Charset.forName("UTF-8")) val nb = load(path) (nb.get.metadata.get.name, path) } def load(path: String): Option[Notebook] = { Logger.info(s"Loading notebook at path $path") val file = notebookFile(path) if (file.exists()) Some(NBSerializer.read(FileUtils.readFileToString(file))) else None } } class NotebookExistsException(message: String) extends IOException(message)
cheleb/spark-notebook
app/notebook/server/NotebookManager.scala
Scala
apache-2.0
4,991
package com.transport.domain.protocol import org.opentripplanner.routing.core.RoutingRequest import org.opentripplanner.routing.services.GraphService import org.scalatest.{FlatSpec, Matchers} import org.scalatest.concurrent.ScalaFutures import scala.concurrent.Await import scala.concurrent.duration._ class TransportRequestSpec extends FlatSpec with Matchers with ScalaFutures { "TransportRequest" should "execute the corresponding request" in { implicit val graphService: GraphService = null object DummyRequestType extends RequestType { override def execute(request: RoutingRequest)(implicit graphService: GraphService): Any = { "success" } } val request = TransportRequest(null, DummyRequestType) val response = Await.result(request.execute, 1 second) response shouldBe "success" } }
ksarath/transport-using-opentripplanner
src/test/scala/com/transport/domain/protocol/TransportRequestSpec.scala
Scala
apache-2.0
840
import leon.collection._ import leon.lang._ object ListOps1 { /** * Simple List operations */ def partition[A] (f: A => Boolean, l: List[A]): (List[A], List[A]) = { l match { case Cons(h, t) => val (h1, h2) = if (f(h)) (Cons(h, Nil()), Nil[A]()) else (Nil[A](), Cons(h, Nil())) val (t1, t2) = partition(f, t) (h1 ++ t1, h2 ++ t2) case Nil() => (Nil[A](), Nil[A]()) } } ensuring { x => x match { case (a, b) => a.forall(f) && b.forall(x => !f(x)) && a.size + b.size == l.size && a.content ++ b.content == l.content } } def collect[A, B] (f: A => Option[B], l: List[A]): List[B] = { l match { case Cons(h, t) => f(h) match { case Some(b) => Cons(b, collect(f, t)) case None() => collect(f, t) } case Nil() => Nil[B]() } } ensuring { res => res.size <= l.size } def collectFirst[A, B] (f: A => Option[B], l: List[A]): Option[B] = { l match { case Cons(h, t) => f(h).orElse(collectFirst(f, t)) case Nil() => None[B]() } } ensuring { res => !l.isEmpty || res.isEmpty } def count[A] (f: A => Boolean, l: List[A]): Int = { l match { case Cons(h, t) => (if (f(h)) 1 else 0) + count(f, t) case Nil() => 0 } } ensuring { res => !(res > 0) || !l.isEmpty } def dropWhile[A] (f: A => Boolean, l: List[A]): List[A] = { l match { case Cons(h, t) if f(h) => dropWhile(f, t) case Cons(h, t) if !f(h) => l case Nil() => Nil[A]() } } ensuring { res => if (res.size < l.size) { f(l.head) } else { l.isEmpty || !f(l.head) } } def forall[A] (f: A => Boolean, l: List[A]): Boolean = { l match { case Cons(h, t) if f(h) => forall(f, t) case Cons(_, t) => false case Nil() => true } } ensuring { res => res == !exists[A]({ x => !f(x)}, l) } def exists[A] (f: A => Boolean, l: List[A]): Boolean = { l match { case Cons(h, t) if f(h) => true case Cons(_, t) => exists(f, t) case Nil() => false } } ensuring { res => res == res } /** * Map with universal quantifier in post as a witness argument */ def mapWitness[A, B] (f: A => B, l: List[A], w: A): List[B] = { l match { case Cons(h, t) => f(h) :: mapWitness(f, t, w) case Nil() => Nil[B]() } } ensuring { res => if (l.content contains w) res.content contains f(w) else true } }
ericpony/scala-examples
testcases/verification/higher-order/valid/ListOps1.scala
Scala
mit
2,625
package testhelpers import scala.concurrent.{ExecutionContext, ExecutionContextExecutor} trait DefaultExecutionContext { protected implicit val ec: ExecutionContextExecutor = ExecutionContext.global }
Dasiu/play-framework-test-project
test/testhelpers/DefaultExecutionContext.scala
Scala
mit
205
package top.myetl.lucenerdd.store import java.io.IOException import java.nio.file.FileAlreadyExistsException import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FSDataOutputStream, FileSystem, Path} import org.apache.hadoop.ipc.RemoteException import org.apache.lucene.store._ import org.apache.spark.Logging import top.myetl.lucenerdd.util.FsUtils /** * Created by pengda on 17/1/10. */ class HdfsLockFactory extends LockFactory with Logging{ override def obtainLock(dir: Directory, lockName: String): Lock = { if( !(dir.isInstanceOf[HdfsDirectory])){ throw new UnsupportedOperationException("HdfsLockFactory can only be used with HdfsDirectory subclasses, got dir"+dir) } val hdfsDir = dir.asInstanceOf[HdfsDirectory] val config = hdfsDir.config val lockPath = hdfsDir.path val lockFile = new Path(lockPath, lockName) val lockDir = lockPath.toUri.getPath val fs: FileSystem = FileSystem.get(lockPath.toUri, config) var file: FSDataOutputStream = null var loop = true while (loop){ try{ if(!fs.exists(lockPath)){ val success =fs.mkdirs(lockPath) if(!success) throw new RuntimeException("Could not create directory:"+lockPath) }else{ // just to check for safe mode fs.mkdirs(lockPath) } file = fs.create(lockFile, false) loop = false }catch { case e: FileAlreadyExistsException => throw new LockObtainFailedException("Cannot obtain lock file:"+lockFile, e) case e: RemoteException => { if(e.getClassName.equals("org.apache.hadoop.hdfs.server.namenode.SafeModeException")){ logWarning("The NameNode is in SafeMode - wait 5 seconds and try again") try{ Thread.sleep(5000) }catch { case e2: InterruptedException => Thread.interrupted() } }else{ throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e); } } case e: IOException => throw new LockObtainFailedException("Cannot obtain lock file:"+lockFile, e) }finally { FsUtils.close(fs) } } new HdfsLock(config, lockFile) } } class HdfsLock(val conf: Configuration, val lockFile: Path) extends Lock{ @volatile var closed: Boolean = false private val lock = new Object() override def ensureValid(): Unit = {} override def close(): Unit = { if(!closed){ closed = true val dir = lockFile.toUri.getPath def fs: FileSystem = FsUtils.get(lockFile, conf) try{ if(fs.exists(lockFile)){ try{ fs.delete(lockFile, false) }catch { case ed: Exception => println("ed exception "+ed.getMessage) } if(fs.exists(lockFile)){ throw new LockReleaseFailedException("failed to delete: " + lockFile) } } }catch { case e: Exception => println(" e exception "+e.getMessage) }finally { FsUtils.close(fs) } } } override def toString: String = { "HdfsLock(lockFile="+lockFile+")" } } object HdfsLockFactory{ def apply(): HdfsLockFactory = new HdfsLockFactory() }
myetl/sparkLu
src/main/scala/top/myetl/lucenerdd/store/HdfsLockFactory.scala
Scala
apache-2.0
3,256
package dbtile import org.scalatest.FunSuite import org.scalatest.BeforeAndAfter import java.io.File class SqlQueryLookupTest extends FunSuite { val sqlLookup = new SqlQueryLookup(new File("/Users/marcin/projects/scala/db-tile/dbtile/src/test/resources/sql/")) test ("should return raw contents of SQL template") { val sql = sqlLookup.get("default").get assert (sql == "SELECT MAX(ROUND(latitude, {{round}})) AS latitude, MAX(ROUND(longitude, {{round}})) AS longitude FROM {{table_name}} GROUP BY CONCAT(ROUND(latitude, {{round}}), ROUND(longitude, {{round}}))") } test ("should replace tokens define in tokens map") { val tokens = Map( "round" -> "2", "table_name" -> "my_table" ) val sql = sqlLookup.getWithReplace("default", tokens).get assert (sql == "SELECT MAX(ROUND(latitude, 2)) AS latitude, MAX(ROUND(longitude, 2)) AS longitude FROM my_table GROUP BY CONCAT(ROUND(latitude, 2), ROUND(longitude, 2))") } test ("should replace all tokens in SQL template") (pending) }
martez81/dbtile
dbtile/src/test/scala/dbtile/SqlQueryLookupTest.scala
Scala
mit
1,079
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.rules.physical.stream import org.apache.flink.table.api.TableException import org.apache.flink.table.planner.calcite.FlinkTypeFactory import org.apache.flink.table.planner.plan.nodes.FlinkRelNode import org.apache.flink.table.planner.plan.nodes.logical.{FlinkLogicalJoin, FlinkLogicalRel, FlinkLogicalSnapshot} import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalJoin import org.apache.flink.table.planner.plan.utils.{IntervalJoinUtil, TemporalJoinUtil} import org.apache.flink.table.planner.plan.utils.WindowJoinUtil.containsWindowStartEqualityAndEndEquality import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelTraitSet} import org.apache.calcite.rel.RelNode import scala.collection.JavaConversions._ /** * Rule that converts [[FlinkLogicalJoin]] without window bounds in join condition * to [[StreamPhysicalJoin]]. */ class StreamPhysicalJoinRule extends StreamPhysicalJoinRuleBase("StreamPhysicalJoinRule") { override def matches(call: RelOptRuleCall): Boolean = { val join: FlinkLogicalJoin = call.rel(0) if (!join.getJoinType.projectsRight) { // SEMI/ANTI join always converts to StreamExecJoin now return true } val left: FlinkLogicalRel = call.rel(1).asInstanceOf[FlinkLogicalRel] val right: FlinkLogicalRel = call.rel(2).asInstanceOf[FlinkLogicalRel] val joinRowType = join.getRowType if (left.isInstanceOf[FlinkLogicalSnapshot]) { throw new TableException( "Temporal table join only support apply FOR SYSTEM_TIME AS OF on the right table.") } // this rule shouldn't match temporal table join if (right.isInstanceOf[FlinkLogicalSnapshot] || TemporalJoinUtil.containsTemporalJoinCondition(join.getCondition)) { return false } val (windowBounds, remainingPreds) = extractWindowBounds(join) if (windowBounds.isDefined) { return false } if (containsWindowStartEqualityAndEndEquality(join)) { return false } // remaining predicate must not access time attributes val remainingPredsAccessTime = remainingPreds.isDefined && IntervalJoinUtil.accessesTimeAttribute(remainingPreds.get, joinRowType) val rowTimeAttrInOutput = joinRowType.getFieldList .exists(f => FlinkTypeFactory.isRowtimeIndicatorType(f.getType)) if (rowTimeAttrInOutput) { throw new TableException( "Rowtime attributes must not be in the input rows of a regular join. " + "As a workaround you can cast the time attributes of input tables to TIMESTAMP before.") } // joins require an equality condition // or a conjunctive predicate with at least one equality condition // and disable outer joins with non-equality predicates(see FLINK-5520) // And do not accept a FlinkLogicalTemporalTableSourceScan as right input !remainingPredsAccessTime } override protected def transform( join: FlinkLogicalJoin, leftInput: FlinkRelNode, leftConversion: RelNode => RelNode, rightInput: FlinkRelNode, rightConversion: RelNode => RelNode, providedTraitSet: RelTraitSet): FlinkRelNode = { new StreamPhysicalJoin( join.getCluster, providedTraitSet, leftConversion(leftInput), rightConversion(rightInput), join.getCondition, join.getJoinType) } } object StreamPhysicalJoinRule { val INSTANCE: RelOptRule = new StreamPhysicalJoinRule }
clarkyzl/flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamPhysicalJoinRule.scala
Scala
apache-2.0
4,272
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.evaluation import java.{lang => jl} import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.spark.annotation.Since import org.apache.spark.api.java.{JavaRDD, JavaSparkContext} import org.apache.spark.internal.Logging import org.apache.spark.rdd.RDD /** * Evaluator for ranking algorithms. * * Java users should use `RankingMetrics$.of` to create a [[RankingMetrics]] instance. * * @param predictionAndLabels an RDD of (predicted ranking, ground truth set) pairs. */ @Since("1.2.0") class RankingMetrics[T: ClassTag](predictionAndLabels: RDD[(Array[T], Array[T])]) extends Logging with Serializable { /** * Compute the average precision of all the queries, truncated at ranking position k. * * If for a query, the ranking algorithm returns n (n is less than k) results, the precision * value will be computed as #(relevant items retrieved) / k. This formula also applies when * the size of the ground truth set is less than k. * * If a query has an empty ground truth set, zero will be used as precision together with * a log warning. * * See the following paper for detail: * * IR evaluation methods for retrieving highly relevant documents. K. Jarvelin and J. Kekalainen * * @param k the position to compute the truncated precision, must be positive * @return the average precision at the first k ranking positions */ @Since("1.2.0") def precisionAt(k: Int): Double = { require(k > 0, "ranking position k should be positive") predictionAndLabels.map { case (pred, lab) => countRelevantItemRatio(pred, lab, k, k) }.mean() } /** * Returns the mean average precision (MAP) of all the queries. * If a query has an empty ground truth set, the average precision will be zero and a log * warning is generated. */ lazy val meanAveragePrecision: Double = { predictionAndLabels.map { case (pred, lab) => val labSet = lab.toSet val k = math.max(pred.length, labSet.size) averagePrecision(pred, labSet, k) }.mean() } /** * Returns the mean average precision (MAP) at ranking position k of all the queries. * If a query has an empty ground truth set, the average precision will be zero and a log * warning is generated. * @param k the position to compute the truncated precision, must be positive * @return the mean average precision at first k ranking positions */ @Since("3.0.0") def meanAveragePrecisionAt(k: Int): Double = { require(k > 0, "ranking position k should be positive") predictionAndLabels.map { case (pred, lab) => averagePrecision(pred, lab.toSet, k) }.mean() } /** * Computes the average precision at first k ranking positions of all the queries. * If a query has an empty ground truth set, the value will be zero and a log * warning is generated. * * @param pred predicted ranking * @param lab ground truth * @param k use the top k predicted ranking, must be positive * @return average precision at first k ranking positions */ private def averagePrecision(pred: Array[T], lab: Set[T], k: Int): Double = { if (lab.nonEmpty) { var i = 0 var cnt = 0 var precSum = 0.0 val n = math.min(k, pred.length) while (i < n) { if (lab.contains(pred(i))) { cnt += 1 precSum += cnt.toDouble / (i + 1) } i += 1 } precSum / math.min(lab.size, k) } else { logWarning("Empty ground truth set, check input data") 0.0 } } /** * Compute the average NDCG value of all the queries, truncated at ranking position k. * The discounted cumulative gain at position k is computed as: * sum,,i=1,,^k^ (2^{relevance of ''i''th item}^ - 1) / log(i + 1), * and the NDCG is obtained by dividing the DCG value on the ground truth set. In the current * implementation, the relevance value is binary. * If a query has an empty ground truth set, zero will be used as ndcg together with * a log warning. * * See the following paper for detail: * * IR evaluation methods for retrieving highly relevant documents. K. Jarvelin and J. Kekalainen * * @param k the position to compute the truncated ndcg, must be positive * @return the average ndcg at the first k ranking positions */ @Since("1.2.0") def ndcgAt(k: Int): Double = { require(k > 0, "ranking position k should be positive") predictionAndLabels.map { case (pred, lab) => val labSet = lab.toSet if (labSet.nonEmpty) { val labSetSize = labSet.size val n = math.min(math.max(pred.length, labSetSize), k) var maxDcg = 0.0 var dcg = 0.0 var i = 0 while (i < n) { // Base of the log doesn't matter for calculating NDCG, // if the relevance value is binary. val gain = 1.0 / math.log(i + 2) if (i < pred.length && labSet.contains(pred(i))) { dcg += gain } if (i < labSetSize) { maxDcg += gain } i += 1 } dcg / maxDcg } else { logWarning("Empty ground truth set, check input data") 0.0 } }.mean() } /** * Compute the average recall of all the queries, truncated at ranking position k. * * If for a query, the ranking algorithm returns n results, the recall value will be * computed as #(relevant items retrieved) / #(ground truth set). This formula * also applies when the size of the ground truth set is less than k. * * If a query has an empty ground truth set, zero will be used as recall together with * a log warning. * * See the following paper for detail: * * IR evaluation methods for retrieving highly relevant documents. K. Jarvelin and J. Kekalainen * * @param k the position to compute the truncated recall, must be positive * @return the average recall at the first k ranking positions */ @Since("3.0.0") def recallAt(k: Int): Double = { require(k > 0, "ranking position k should be positive") predictionAndLabels.map { case (pred, lab) => countRelevantItemRatio(pred, lab, k, lab.toSet.size) }.mean() } /** * Returns the relevant item ratio computed as #(relevant items retrieved) / denominator. * If a query has an empty ground truth set, the value will be zero and a log * warning is generated. * * @param pred predicted ranking * @param lab ground truth * @param k use the top k predicted ranking, must be positive * @param denominator the denominator of ratio * @return relevant item ratio at the first k ranking positions */ private def countRelevantItemRatio(pred: Array[T], lab: Array[T], k: Int, denominator: Int): Double = { val labSet = lab.toSet if (labSet.nonEmpty) { val n = math.min(pred.length, k) var i = 0 var cnt = 0 while (i < n) { if (labSet.contains(pred(i))) { cnt += 1 } i += 1 } cnt.toDouble / denominator } else { logWarning("Empty ground truth set, check input data") 0.0 } } } object RankingMetrics { /** * Creates a [[RankingMetrics]] instance (for Java users). * @param predictionAndLabels a JavaRDD of (predicted ranking, ground truth set) pairs */ @Since("1.4.0") def of[E, T <: jl.Iterable[E]](predictionAndLabels: JavaRDD[(T, T)]): RankingMetrics[E] = { implicit val tag = JavaSparkContext.fakeClassTag[E] val rdd = predictionAndLabels.rdd.map { case (predictions, labels) => (predictions.asScala.toArray, labels.asScala.toArray) } new RankingMetrics(rdd) } }
LantaoJin/spark
mllib/src/main/scala/org/apache/spark/mllib/evaluation/RankingMetrics.scala
Scala
apache-2.0
8,652
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn.ops import com.intel.analytics.bigdl.nn.abstractnn.Activity import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import scala.reflect.ClassTag class TruncatedNormal[T: ClassTag, DataType: ClassTag]( mean: DataType = 0.0, stddev: DataType = 1.0, seed: Int = 0 ) (implicit ev: TensorNumeric[T]) extends Operation[Tensor[Int], Tensor[DataType], T] { def updateOutput(input: Tensor[Int]): Tensor[DataType] = { require(input.nDimension() == 1, "the shape should be a one-dimensional tensor.") val shape = input.asInstanceOf[Tensor[Int]].storage().toArray output.resize(shape).randn( mean.asInstanceOf[Double], stddev.asInstanceOf[Double]) output } } object TruncatedNormal { def apply[T: ClassTag, DataType: ClassTag]( mean: Double = 0.0, stddev: Double = 1.0, seed: Int = 0) (implicit ev: TensorNumeric[T]): Operation[Activity, Activity, T] = ModuleToOperation[T]( new TruncatedNormal(mean, stddev, seed)) }
jenniew/BigDL
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/ops/TruncatedNormal.scala
Scala
apache-2.0
1,680
package mojave import scala.reflect.runtime.universe protected[mojave] object CaseClassFieldAccessor { import reflect._ import scala.reflect.runtime._ import scala.reflect.runtime.universe._ private class Empty private lazy val mirror = universe.runtimeMirror(getClass.getClassLoader) def hasField(obj: Any, paramName: String) = obj.getClass.getMethod(paramName) != null def getField(obj: Any, paramName: String): Any = { obj.getClass.getMethod(paramName).invoke(obj) } def setField[R : ClassTag](obj: R, paramName: String, paramValue: Any): R = { val instanceMirror = mirror.reflect(obj) val decl = instanceMirror.symbol.asType.toType var found = false val parametersForCopyMethod = decl.members.filter(_.asTerm.isAccessor).map { method: Symbol => if (method.asTerm.name.toString == paramName) { found = true paramValue } else { instanceMirror.reflectField(method.asTerm).get } }.toArray.reverse if (!found) { throw new NoSuchMethodException(s"Class ${obj.getClass.getName} is not a case class with member $paramName") } val copyMethod = decl.decl(TermName("copy")).asMethod val copyMethodInstance = instanceMirror.reflectMethod(copyMethod) copyMethodInstance(parametersForCopyMethod: _*).asInstanceOf[R] } }
raimohanska/mojave
src/main/scala/mojave/CaseClassFieldAccessor.scala
Scala
mit
1,333
package org.scalaide.ui.internal.editor.decorators.semantichighlighting import scala.collection.immutable import org.eclipse.core.runtime.IProgressMonitor import org.eclipse.core.runtime.jobs.Job import org.eclipse.jdt.core.dom.CompilationUnit import org.eclipse.jdt.internal.ui.javaeditor.JavaSourceViewer import org.eclipse.jdt.internal.ui.text.java.IJavaReconcilingListener import org.eclipse.jface.text.IRegion import org.eclipse.jface.text.ITextPresentationListener import org.eclipse.jface.text.TextPresentation import org.eclipse.jface.util.IPropertyChangeListener import org.eclipse.jface.util.PropertyChangeEvent import org.eclipse.swt.custom.StyleRange import org.scalaide.core.internal.decorators.semantichighlighting.PositionsTracker import org.scalaide.core.internal.decorators.semantichighlighting.classifier.SymbolTypes import org.scalaide.logging.HasLogger import org.scalaide.ui.internal.editor.ScalaCompilationUnitEditor import org.scalaide.ui.syntax.ScalaSyntaxClasses import org.scalaide.util.eclipse.EditorUtils /** This class is responsible of: * * - Triggering the semantic highlighting job as soon as the [[org.eclipse.jdt.internal.ui.text.JavaReconciler]] * has finished reconciling the opened compilation unit. * * - Updating the editor's text presentation with the up-to-date semantic highlighting styles. * * @note All accesses to this class are confined to the UI Thread. */ private class TextPresentationEditorHighlighter(editor: ScalaCompilationUnitEditor, preferences: Preferences, addReconcilingListener: IJavaReconcilingListener => Unit, removeReconcilingListener: IJavaReconcilingListener => Unit) extends TextPresentationHighlighter { import TextPresentationEditorHighlighter._ @volatile private var highlightingOnReconciliation: IJavaReconcilingListener = _ @volatile private var textPresentationChangeListener: ApplyHighlightingTextPresentationChanges = _ override def initialize(semanticHighlightingJob: Job, positionsTracker: PositionsTracker): Unit = { highlightingOnReconciliation = new PerformSemanticHighlightingOnReconcilation(semanticHighlightingJob) textPresentationChangeListener = new ApplyHighlightingTextPresentationChanges(semanticHighlightingJob, positionsTracker, preferences) Option(preferences.store) foreach (_.addPropertyChangeListener(textPresentationChangeListener)) addReconcilingListener(highlightingOnReconciliation) // it's important to prepend the listener or semantic highlighting coloring will hide the style applied for hyperlinking when the // user hovers on a semantically highlighted binding. Option(sourceViewer) foreach (_.prependTextPresentationListener(textPresentationChangeListener)) } override def dispose(): Unit = { Option(preferences.store) foreach (_.removePropertyChangeListener(textPresentationChangeListener)) removeReconcilingListener(highlightingOnReconciliation) Option(sourceViewer) foreach (_.removeTextPresentationListener(textPresentationChangeListener)) } override def sourceViewer: JavaSourceViewer = editor.sourceViewer override def updateTextPresentation(damage: IRegion): Unit = { val textPresentation = createRepairDescription(damage) textPresentation match { case None => sourceViewer.invalidateTextPresentation() // invalidate the whole editor's text presentation case Some(tp) => sourceViewer.changeTextPresentation(tp, /*controlRedraw=*/ false) } } private def createRepairDescription(damage: IRegion): Option[TextPresentation] = EditorUtils.withDocument(sourceViewer) { document => val configuration = editor.createJavaSourceViewerConfiguration() val presentationReconciler = configuration.getPresentationReconciler(sourceViewer) presentationReconciler.createRepairDescription(damage, document) } } object TextPresentationEditorHighlighter { def apply(editor: ScalaCompilationUnitEditor, preferences: Preferences, addReconcilingListener: IJavaReconcilingListener => Unit, removeReconcilingListener: IJavaReconcilingListener => Unit): TextPresentationHighlighter = new TextPresentationEditorHighlighter(editor, preferences, addReconcilingListener, removeReconcilingListener) private class PerformSemanticHighlightingOnReconcilation(semanticHighlightingJob: Job) extends IJavaReconcilingListener { override def aboutToBeReconciled(): Unit = () override def reconciled(ast: CompilationUnit, forced: Boolean, progressMonitor: IProgressMonitor): Unit = { /* There is no need to call `semanticHighlightingJob.cancel()` here because the document has a listener that * already cancels the ongoing semantic highlighting job whenever the document is about to be changed. And `this` * reconciling listener always gets executed '''after''' the aforementioned listener (check * [[org.scalaide.ui.internal.editors.decorators.semantichighlighting.Presenter$DocumentContentListener]] for more details). * * Furthermore, a new semantic highlighting job run is only scheduled if the ongoing reconciliation has not been * cancelled. If it was cancelled, this usually means that the editor was closed, or the document was change. * In the editor was closed, there is clearly no need for reconciling. While, if the document changed, then the * compilation unit will be soon reconciled again. */ if (!progressMonitor.isCanceled()) semanticHighlightingJob.schedule() } } /** This class is responsible of applying the semantic highlighting styles in the editor. * * @note Mind that the implementation needs to be blazing fast because `applyTextPresentation` is called at '''every''' * keystroke (and, often, more than once). If it takes more than a few milliseconds to execute, users will perceive * the slow-down when typing. * * @param positionsTracker Holds the semantic positions that needs to be colored in the editor. * @param preferences The user's preferences. */ private class ApplyHighlightingTextPresentationChanges(reconciler: Job, positionsTracker: PositionsTracker, preferences: Preferences) extends IPropertyChangeListener with ITextPresentationListener with HasLogger { private var semanticCategory2style: immutable.Map[SymbolTypes.SymbolType, HighlightingStyle] = { (for (symType <- SymbolTypes.values) yield (symType -> HighlightingStyle(preferences, symType)))(collection.breakOut) } override def propertyChange(event: PropertyChangeEvent): Unit = { if (event.getProperty().startsWith(ScalaSyntaxClasses.IDENTIFIER_IN_INTERPOLATED_STRING.baseName + ".")) { val syms: Set[SymbolTypes.SymbolType] = positionsTracker.identifiersInInterpolatedStrings.map(_.kind)(collection.breakOut) invalidateSymTypes(syms.toSeq: _*) } else { for { semanticCategory <- ScalaSyntaxClasses.scalaSemanticCategory.children if event.getProperty().startsWith(semanticCategory.baseName) symType <- SymbolTypes.values.find(HighlightingStyle.symbolTypeToSyntaxClass(_) == semanticCategory) if symType.isInstanceOf[SymbolTypes.SymbolType] } invalidateSymTypes(symType) } } private def invalidateSymTypes(symTypes: SymbolTypes.SymbolType*): Unit = { for (symType <- symTypes) { semanticCategory2style += symType -> HighlightingStyle(preferences, symType) positionsTracker.deletesPositionsOfType(symType) } reconciler.schedule() } override def applyTextPresentation(textPresentation: TextPresentation): Unit = { val damagedRegion = textPresentation.getExtent() // Portion of the editor whose styles needs to be recomputed. val positions = positionsTracker.positionsInRegion(damagedRegion) val styles: Array[StyleRange] = { for { position <- positions style = semanticCategory2style(position.kind) if (style.enabled || position.shouldStyle) && !position.isDeleted() } yield style.style(position) } textPresentation.replaceStyleRanges(styles) } } }
scala-ide/scala-ide
org.scala-ide.sdt.core/src/org/scalaide/ui/internal/editor/decorators/semantichighlighting/TextPresentationEditorHighlighter.scala
Scala
bsd-3-clause
8,143