code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package models
import play.Logger
import play.api.libs.json.Json
import reactivemongo.bson._
// necessaire (implict ... Json.format)
import play.modules.reactivemongo.json.BSONFormats.BSONObjectIDFormat
/**
* User: Louis TOURNAYRE
*/
case class Site(id: Option[BSONObjectID], name: String, activity: String, adress: Option[String], zipcode: Option[String],
city: Option[String], country: Option[String], loc: Location)
object Site {
implicit val siteFormat = Json.format[Site]
implicit object SiteBSONWriter extends BSONDocumentWriter[Site] {
def write(site: Site): BSONDocument =
BSONDocument(
"_id" -> site.id.getOrElse(BSONObjectID.generate),
"name" -> site.name,
"activity" -> site.activity,
"adress" -> site.adress,
"zipcode" -> site.zipcode,
"city" -> site.city,
"country" -> site.country,
"loc" -> site.loc
)
}
implicit object SiteBSONReader extends BSONDocumentReader[Site] {
def read(doc: BSONDocument): Site =
Site(
doc.getAs[BSONObjectID]("_id"),
doc.getAs[String]("name").get,
doc.getAs[String]("activity").get,
doc.getAs[String]("adress"),
doc.getAs[String]("zipcode"),
doc.getAs[String]("city"),
doc.getAs[String]("country"),
doc.getAs[Location]("loc").get
)
}
}
| louidji/eventual | app/models/Site.scala | Scala | gpl-3.0 | 1,376 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.sql
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.{CommonTestData, TableProgramsCollectionTestBase}
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.test.util.TestBaseUtils
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.JavaConverters._
@RunWith(classOf[Parameterized])
class TableSourceITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testCsvTableSource(): Unit = {
val csvTable = CommonTestData.getCsvTableSource
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
tEnv.registerTableSource("csvTable", csvTable)
val results = tEnv.sqlQuery(
"SELECT id, `first`, `last`, score FROM csvTable").collect()
val expected = Seq(
"1,Mike,Smith,12.3",
"2,Bob,Taylor,45.6",
"3,Sam,Miller,7.89",
"4,Peter,Smith,0.12",
"5,Liz,Williams,34.5",
"6,Sally,Miller,6.78",
"7,Alice,Smith,90.1",
"8,Kelly,Williams,2.34").mkString("\n")
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testCsvTableSourceWithEmptyColumn(): Unit = {
val csvTable = CommonTestData.getCsvTableSourceWithEmptyColumn
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = BatchTableEnvironment.create(env, config)
tEnv.registerTableSource("csvTable", csvTable)
val results = tEnv.sqlQuery(
"SELECT id, `first`, `last`, score FROM csvTable").collect()
val expected = Seq(
"1,Mike,Smith,12.3",
"2,Bob,Taylor,null",
"null,Leonard,null,null").mkString("\n")
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testNested(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tableEnv = BatchTableEnvironment.create(env, config)
val nestedTable = CommonTestData.getNestedTableSource
tableEnv.registerTableSource("NestedPersons", nestedTable)
val result = tableEnv.sqlQuery("SELECT NestedPersons.firstName, NestedPersons.lastName," +
"NestedPersons.address.street, NestedPersons.address.city AS city " +
"FROM NestedPersons " +
"WHERE NestedPersons.address.city LIKE 'Dublin'").collect()
val expected = "Bob,Taylor,Pearse Street,Dublin"
TestBaseUtils.compareResultAsText(result.asJava, expected)
}
}
| gyfora/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/batch/sql/TableSourceITCase.scala | Scala | apache-2.0 | 3,434 |
/*
* Copyright (c) 2013 Scott Abernethy.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package gate
import org.squeryl.PrimitiveTypeMode._
import model.Mythos._
import model._
import state._
import akka.actor.{Actor, ActorRef}
import play.api.Logger
// TODO merge with Cloner
case class StartPresenting(job: Presence)
case class FinishedPresenting(job: Presence, success: Boolean)
case object CancelPresenting
class Presenter(val processs: Processs, val watcher: ActorRef, val artifactServer: ActorRef) extends Actor {
var cur: Option[Presence] = None
var requester: ActorRef = context.system.deadLetters
var destroyHandle: Destroyable = new NonDestroyable
def receive = {
case StartPresenting(job) if (cur.isEmpty) => {
requester = sender
start(job)
}
case CancelPresenting => {
cancel
}
case 'Cancel => {
cancel
}
case exit: Exit => {
cur.foreach(attempted(_, exit))
}
}
def start(presence: Presence) {
Logger.debug(this + " start " + presence)
presence.state = PresenceState.presenting
presence.attempted = T.now
transaction { presences.insertOrUpdate(presence) }
cur = Some(presence)
artifactServer ! ArtifactTouched(ArtifactPresenting, presence.artifactId)
transaction {
// todo fix with better comprehension
for {
src <- presence.artifact.flatMap(_.localPath)
dest = presence.artifactId.toString
} yield ("presenter" :: escapeString(src) :: escapeString(dest) :: Nil)
} match {
case Some(command) => {
val (future, destroyable) = processs.start(command)
destroyHandle = destroyable
import util.Context.defaultOperations
future.onFailure {
case e: Exception => self ! Exit(-1, e.getMessage :: Nil, -1)
}
future.onSuccess {
case exit: Exit => self ! exit
}
}
case None => {
failedAttempt(presence)
}
}
}
def cancel {
destroyHandle.destroy
destroyHandle = new NonDestroyable
cur.foreach(failedAttempt _)
}
def attempted(p: Presence, result: Exit) {
Logger.debug("Presenter result " + result)
val success = result.exitValue == 0
p.state = if (success) PresenceState.present else PresenceState.called
p.attempts = p.attempts + 1
p.duration = result.duration
transaction { presences.update(p) }
cur = None
if (success) {
watcher ! 'Wake
} else {
watcher ! PresenceFailed(p)
}
artifactServer ! ArtifactTouched(if (success) ArtifactPresented else ArtifactPresentFailed, p.artifactId)
requester ! FinishedPresenting(p, success)
context.stop(self)
}
def failedAttempt(p: Presence) {
attempted(p, Exit(-1, Nil, -1))
}
def escapeString(in: String): String = {
in // no escaping necessary here
}
}
| scott-abernethy/opener-of-the-way | app/gate/Presenter.scala | Scala | gpl-3.0 | 3,467 |
package de.tu_berlin.impro3.spark.spatio_temporal_dynamics.parsers
import java.util.Date
import de.tu_berlin.impro3.spark.spatio_temporal_dynamics._
import model.Tweet
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.PropertyChecks
@RunWith(classOf[JUnitRunner])
class ParserSpec extends PropSpec with PropertyChecks with Matchers {
val jsonParser = new JsonParser
val jaxParser = new JaxParser
val csvParser = new CsvParser
val tabParser = new TabularParser
def toJson(tweet: Tweet) = {
val date = jsonParser.dateFormat.get.format(new Date(tweet.time))
val tags = tweet.tags.map { tag => s"""{"text":"$tag"}"""}.mkString(",")
val gps = tweet.gps match {
case Some((lat, lon)) => s"""{"coordinates":[$lon, $lat]}"""
case None => "null"
}
s"""{"text":"${tweet.text}",
| "user":{"id_str":"${tweet.user}"},
| "created_at":"$date",
| "entities":{"hashtags":[$tags]},
| "coordinates":$gps}""".stripMargin
}
def toCsv(tweet: Tweet) = {
val date = csvParser.dateFormat.get.format(new Date(tweet.time))
val (lat, lon) = tweet.gps match {
case Some(gps) => gps
case None => (0.0, 0.0)
}
val seq = Seq(tweet.user, date, tweet.text, 0, lon, lat)
seq.mkString(csvParser.separator.toString)
}
def toTabular(tweet: Tweet) = {
val user = "USER_" + tweet.user
val date = tabParser.dateFormat.get.format(new Date(tweet.time))
val (lat, lon) = tweet.gps match {
case Some(gps) => gps
case None => (0.0, 0.0)
}
val seq = Seq(user, date, tweet.gps, lat, lon, tweet.text)
seq.mkString(tabParser.separator.toString)
}
property("A JsonParser should parse arbitrary Tweets from JSON format") {
forAll { tweet: Tweet =>
val option = jsonParser.parse(toJson(tweet))
option should be ('defined)
option.get shouldMatchTweet tweet
}
}
property("A JaxParser should parse arbitrary Tweets from JSON format") {
forAll { tweet: Tweet =>
val option = jaxParser.parse(toJson(tweet))
option should be ('defined)
option.get shouldMatchTweet tweet
}
}
property("A CsvParser should parse arbitrary Tweets from CSV format") {
forAll { tweet: Tweet =>
whenever(tweet.gps.isDefined) {
val option = csvParser.parse(toCsv(tweet))
option should be ('defined)
option.get shouldMatchTweet tweet
}
}
}
property("A TabularParser should parse arbitrary Tweets from tab. format") {
forAll { tweet: Tweet =>
whenever(tweet.gps.isDefined) {
val option = tabParser.parse(toTabular(tweet))
option should be ('defined)
option.get shouldMatchTweet tweet
}
}
}
}
| joroKr21/spatio-temporal-dynamics | impro3-ws14-spark/src/test/scala/de/tu_berlin/impro3/spark/spatio_temporal_dynamics/parsers/ParserSpec.scala | Scala | apache-2.0 | 2,820 |
package br.gov.lexml.swing.componentes
import br.gov.lexml.swing.componentes.models.ListChoiceActionApprover
class DefaultListChoiceActionApprover[T] extends
ListChoiceActionApprover[T] {
override def additionApproved(e : T) = true
override def removalApproved(e : T, p : Int) = true
override def moveApproved(e : T,p : Int) = true
} | lexml/lexml-swing-componentes | src/main/scala/br/gov/lexml/swing/componentes/DefaultListChoiceActionApprover.scala | Scala | gpl-2.0 | 347 |
package core
import akka.actor.Actor
import com.datastax.driver.core.{BoundStatement, Cluster, Row}
import domain.Tweet
import core.TweetReaderActor.{CountAll, FindAll}
import com.datastax.driver.core.querybuilder.QueryBuilder
object TweetReaderActor {
case class FindAll(maximum: Int = 100)
case object CountAll
}
class TweetReaderActor(cluster: Cluster) extends Actor {
val session = cluster.connect(Keyspaces.akkaCassandra)
val countAll = new BoundStatement(session.prepare("select count(*) from tweets;"))
import scala.collection.JavaConversions._
import cassandra.resultset._
import context.dispatcher
import akka.pattern.pipe
def buildTweet(r: Row): Tweet = {
val id = r.getString("key")
val user = r.getString("user_user")
val text = r.getString("text")
val createdAt = r.getDate("createdat")
Tweet(id, user, text, createdAt)
}
def receive: Receive = {
case FindAll(maximum) =>
val query = QueryBuilder.select().all().from(Keyspaces.akkaCassandra, "tweets").limit(maximum)
session.executeAsync(query) map(_.all().map(buildTweet).toVector) pipeTo sender
case CountAll =>
session.executeAsync(countAll) map(_.one.getLong(0)) pipeTo sender
}
}
| eigengo/activator-akka-cassandra | src/main/scala/core/tweetread.scala | Scala | apache-2.0 | 1,226 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import kafka.utils.{ZKGroupTopicDirs, Logging}
import kafka.consumer.{ConsumerTimeoutException, ConsumerConfig, ConsumerConnector, Consumer}
import kafka.server._
import kafka.utils.TestUtils
import kafka.serializer._
import kafka.producer.{Producer, KeyedMessage}
import org.junit.Test
import org.apache.log4j.{Level, Logger}
import org.scalatest.junit.JUnit3Suite
import junit.framework.Assert._
class AutoOffsetResetTest extends JUnit3Suite with KafkaServerTestHarness with Logging {
val configs = List(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)))
val topic = "test_topic"
val group = "default_group"
val testConsumer = "consumer"
val NumMessages = 10
val LargeOffset = 10000
val SmallOffset = -1
val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandler])
override def setUp() {
super.setUp()
// temporarily set request handler logger to a higher level
requestHandlerLogger.setLevel(Level.FATAL)
}
override def tearDown() {
// restore set request handler logger to a higher level
requestHandlerLogger.setLevel(Level.ERROR)
super.tearDown
}
@Test
def testResetToEarliestWhenOffsetTooHigh() =
assertEquals(NumMessages, resetAndConsume(NumMessages, "smallest", LargeOffset))
@Test
def testResetToEarliestWhenOffsetTooLow() =
assertEquals(NumMessages, resetAndConsume(NumMessages, "smallest", SmallOffset))
@Test
def testResetToLatestWhenOffsetTooHigh() =
assertEquals(0, resetAndConsume(NumMessages, "largest", LargeOffset))
@Test
def testResetToLatestWhenOffsetTooLow() =
assertEquals(0, resetAndConsume(NumMessages, "largest", SmallOffset))
/* Produce the given number of messages, create a consumer with the given offset policy,
* then reset the offset to the given value and consume until we get no new messages.
* Returns the count of messages received.
*/
def resetAndConsume(numMessages: Int, resetTo: String, offset: Long): Int = {
TestUtils.createTopic(zkClient, topic, 1, 1, servers)
val producer: Producer[String, Array[Byte]] = TestUtils.createProducer(
TestUtils.getBrokerListStrFromConfigs(configs),
keyEncoder = classOf[StringEncoder].getName)
for(i <- 0 until numMessages)
producer.send(new KeyedMessage[String, Array[Byte]](topic, topic, "test".getBytes))
// update offset in zookeeper for consumer to jump "forward" in time
val dirs = new ZKGroupTopicDirs(group, topic)
val consumerProps = TestUtils.createConsumerProperties(zkConnect, group, testConsumer)
consumerProps.put("auto.offset.reset", resetTo)
consumerProps.put("consumer.timeout.ms", "2000")
consumerProps.put("fetch.wait.max.ms", "0")
val consumerConfig = new ConsumerConfig(consumerProps)
TestUtils.updateConsumerOffset(consumerConfig, dirs.consumerOffsetDir + "/" + "0", offset)
info("Updated consumer offset to " + offset)
val consumerConnector: ConsumerConnector = Consumer.create(consumerConfig)
val messageStream = consumerConnector.createMessageStreams(Map(topic -> 1))(topic).head
var received = 0
val iter = messageStream.iterator
try {
for (i <- 0 until numMessages) {
iter.next // will throw a timeout exception if the message isn't there
received += 1
}
} catch {
case e: ConsumerTimeoutException =>
info("consumer timed out after receiving " + received + " messages.")
} finally {
producer.close()
consumerConnector.shutdown
}
received
}
}
| WillCh/cs286A | dataMover/kafka/core/src/test/scala/unit/kafka/integration/AutoOffsetResetTest.scala | Scala | bsd-2-clause | 4,385 |
package com.tribbloids.spookystuff.utils.lifespan
import com.tribbloids.spookystuff.utils.{CommonUtils, IDMixin}
import org.apache.spark.TaskContext
import scala.util.Try
/**
* Java Deserialization only runs constructor of superclass
*/
//CAUTION: keep the empty constructor in subclasses!
// Without it Kryo deserializer will bypass the hook registration steps in init() when deserializing
abstract class Lifespan extends IDMixin with Serializable {
{
init()
}
/**
* should be triggerd on both creation and deserialization
*/
protected def init(): Unit = {
ctx
batchIDs
//always generate on construction
batchIDs.foreach { batchID =>
if (!Cleanable.uncleaned.contains(batchID)) {
registerHook { () =>
Cleanable.cleanSweep(batchID)
}
}
}
}
def readObject(in: java.io.ObjectInputStream): Unit = {
in.defaultReadObject()
init() //redundant?
}
val ctxFactory: () => LifespanContext
@transient lazy val ctx: LifespanContext = ctxFactory()
def getBatchIDs: Seq[Any]
@transient final lazy val batchIDs = getBatchIDs
final protected def _id: Seq[Any] = batchIDs
def registerHook(
fn: () => Unit
): Unit
def nameOpt: Option[String]
override def toString: String = {
val idStr = Try(batchIDs.mkString("/")).getOrElse("[Error]")
(nameOpt.toSeq ++ Seq(idStr)).mkString(":")
}
}
object Lifespan {
abstract class LifespanType extends Serializable with Product {
// default companion class constructor
def apply(nameOpt: Option[String] = None, ctxFactory: () => LifespanContext = () => LifespanContext()): Lifespan
}
case object Task extends LifespanType {
case class ID(id: Long) extends AnyVal {
override def toString: String = s"Task-$id"
}
}
case class Task(
override val nameOpt: Option[String] = None,
ctxFactory: () => LifespanContext = () => LifespanContext()
) extends Lifespan {
def this() = this(None)
import Task._
def task: TaskContext = ctx.task
// override def tpe: LifespanType = Task
override def getBatchIDs: Seq[ID] = Seq(ID(task.taskAttemptId()))
override def registerHook(fn: () => Unit): Unit = {
task.addTaskCompletionListener[Unit] { _ =>
fn()
}
}
}
case object JVM extends LifespanType {
val MAX_NUMBER_OF_SHUTDOWN_HOOKS: Int = CommonUtils.numLocalCores
case class ID(id: Int) extends AnyVal {
override def toString: String = s"JVM-$id"
}
}
case class JVM(
override val nameOpt: Option[String] = None,
ctxFactory: () => LifespanContext = () => LifespanContext()
) extends Lifespan {
def this() = this(None)
import JVM._
override def getBatchIDs = Seq(ID((ctx.thread.getId % JVM.MAX_NUMBER_OF_SHUTDOWN_HOOKS).toInt))
override def registerHook(fn: () => Unit): Unit =
try {
sys.addShutdownHook {
fn()
}
} catch {
case e: IllegalStateException if e.getMessage.contains("Shutdown") =>
}
}
trait Compound extends Lifespan {
def delegates: List[LifespanType]
@transient lazy val delegateInstances: List[Lifespan] = {
delegates.flatMap { v =>
Try {
v.apply(nameOpt, ctxFactory)
}.toOption
}
}
override def getBatchIDs: Seq[Any] = {
delegateInstances.flatMap(_.batchIDs)
}
override def registerHook(fn: () => Unit): Unit = {}
}
case class TaskOrJVM(
nameOpt: Option[String] = None,
ctxFactory: () => LifespanContext = () => LifespanContext()
) extends Compound {
def this() = this(None)
override lazy val delegates: List[LifespanType] = List(Task, JVM)
}
}
| tribbloid/spookystuff | mldsl/src/main/scala/com/tribbloids/spookystuff/utils/lifespan/Lifespan.scala | Scala | apache-2.0 | 3,727 |
package metal
package generic
import scala.reflect.ClassTag
import spire.util.Opt
abstract class Map2[K, V1, V2]
extends Defaults
with Enumerable
with Searchable[K]
with Values1[V1]
with Values2[V2]
with NElements3[K, V1, V2] { lhs =>
implicit def ctK: ClassTag[K]
implicit def K: MetalTag[K]
implicit def ctV1: ClassTag[V1]
implicit def V1: MetalTag[V1]
implicit def ctV2: ClassTag[V2]
implicit def V2: MetalTag[V2]
type Generic = generic.Map2[K, V1, V2]
type Mutable <: mutable.Map2[K, V1, V2]
type Immutable <: immutable.Map2[K, V1, V2]
type Scala <: scala.collection.immutable.Map[K, (V1, V2)]
override def stringPrefix = "Map2"
final def ptrCastT(any: Any): Opt[generic.Map2[K, V1, V2]] = any match {
case rhs: generic.Map2[K, V1, V2] if lhs.ctK == rhs.ctK && lhs.ctV1 == rhs.ctV1 && lhs.ctV2 == rhs.ctV2 => Opt(rhs)
case _ => Opt.empty[generic.Map2[K, V1, V2]]
}
private[metal] def keyArray(ptr: VPtr[lhs.type]): Array[K]
private[metal] def keyIndex(ptr: VPtr[lhs.type]): Int
private[metal] def value1Array(ptr: VPtr[lhs.type]): Array[V1]
private[metal] def value1Index(ptr: VPtr[lhs.type]): Int
private[metal] def value2Array(ptr: VPtr[lhs.type]): Array[V2]
private[metal] def value2Index(ptr: VPtr[lhs.type]): Int
def ptrHash(ptr: VPtr[this.type]): Int = {
val kh = K.hashElement(keyArray(ptr), keyIndex(ptr))
val v1h = V1.hashElement(value1Array(ptr), value1Index(ptr))
val v2h = V2.hashElement(value2Array(ptr), value2Index(ptr))
kh ^ (v1h * 41) ^ (v2h * 41 * 41)
}
def ptrToString(ptr: VPtr[this.type]): String = {
val ks = K.toStringElement(keyArray(ptr), keyIndex(ptr))
val v1s = V1.toStringElement(value1Array(ptr), value1Index(ptr))
val v2s = V2.toStringElement(value2Array(ptr), value2Index(ptr))
s"$ks -> ($v1s, $v2s)"
}
final def ptrEquals(thisPtr: VPtr[this.type], that: generic.Map2[K, V1, V2]): Boolean =
that.ptrFindFromArray(keyArray(thisPtr), keyIndex(thisPtr)) match {
case IsVPtr(thatPtr) =>
val thisA1 = value1Array(thisPtr)
val thisI1 = value1Index(thisPtr)
val thisA2 = value2Array(thisPtr)
val thisI2 = value2Index(thisPtr)
val thatA1 = that.value1Array(thatPtr)
val thatI1 = that.value1Index(thatPtr)
val thatA2 = that.value2Array(thatPtr)
val thatI2 = that.value2Index(thatPtr)
V1.equalsElement(thisA1, thisI1, thatA1, thatI1) &&
V2.equalsElement(thisA2, thisI2, thatA2, thatI2)
case _ => false
}
}
| denisrosset/ptrcoll | library/src/main/scala/metal/generic/Map2.scala | Scala | mit | 2,559 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.bigquery
import com.spotify.scio.bigquery.client.BigQuery
import org.scalacheck.Gen
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
class BigQueryClientTest extends AnyFlatSpec with Matchers with ScalaCheckDrivenPropertyChecks {
"BigQueryClient" should "throw an exception when an empty or null ProjectId is provided" in {
assertThrows[IllegalArgumentException] {
BigQuery("")
}
assertThrows[IllegalArgumentException] {
BigQuery(null)
}
}
it should "work with non-empty ProjectId" in {
val projectIdGen = Gen.alphaNumStr.suchThat(_.nonEmpty)
forAll(projectIdGen)(projectId => BigQuery(projectId))
}
}
| spotify/scio | scio-google-cloud-platform/src/test/scala/com/spotify/scio/bigquery/BigQueryClientTest.scala | Scala | apache-2.0 | 1,382 |
/*
* Copyright 2015 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sbtjooq.codegen
import java.io.FileNotFoundException
import sbt._
import scala.xml.{Node, NodeSeq}
package object internal {
type ConfigTransformer = Node => Node
type VariableExpander = String => Option[NodeSeq]
implicit class JavaVersionCompanionOps(companion: JavaVersion.type) {
def get(javaHome: Option[File]): JavaVersion =
javaHome.fold(systemDefault)(parse(_).fold(sys.error, identity))
def systemDefault: JavaVersion =
companion(sys.props("java.version"))
def parse(javaHome: File): Either[String, JavaVersion] = {
val releaseFile = javaHome / "release"
val versionLine = """JAVA_VERSION="(.+)"""".r
try {
IO.readLines(releaseFile)
.collectFirst {
case versionLine(v) => companion(v)
}
.toRight(s"No JAVA_VERSION line in $releaseFile")
} catch {
case e: FileNotFoundException => Left(e.getMessage)
}
}
}
implicit class JavaVersionOps(javaVersion: JavaVersion) {
def major: Long = javaVersion.numbers match {
case Vector(1L, x, _*) => x
case Vector(x, _*) => x
}
def isJigsawEnabled: Boolean = major >= 9
def isJavaEEModulesBundled: Boolean = major <= 10
}
}
| kxbmap/sbt-jooq | codegen/src/main/scala/sbtjooq/codegen/internal/package.scala | Scala | apache-2.0 | 1,842 |
package scala
package tools.nsc
package interpreter
/**
* Subclass to access some hidden things I need and also some custom behavior.
*/
class HackIMain(settings: Settings, out: JPrintWriter) extends IMain(settings, out) {
def previousRequests = prevRequestList
override protected def parentClassLoader = settings.getClass.getClassLoader()
}
| minyk/spark-notebook | modules/spark/src/main/scala_2.11/spark-pre1.5/HackIMain.scala | Scala | apache-2.0 | 349 |
package gitbucket.core.controller
import gitbucket.core.api._
import gitbucket.core.issues.html
import gitbucket.core.model.Issue
import gitbucket.core.service.IssuesService._
import gitbucket.core.service._
import gitbucket.core.util.ControlUtil._
import gitbucket.core.util.Implicits._
import gitbucket.core.util._
import gitbucket.core.view
import gitbucket.core.view.Markdown
import jp.sf.amateras.scalatra.forms._
import org.scalatra.Ok
class IssuesController extends IssuesControllerBase
with IssuesService with RepositoryService with AccountService with LabelsService with MilestonesService with ActivityService
with ReadableUsersAuthenticator with ReferrerAuthenticator with CollaboratorsAuthenticator with PullRequestService with WebHookIssueCommentService
trait IssuesControllerBase extends ControllerBase {
self: IssuesService with RepositoryService with AccountService with LabelsService with MilestonesService with ActivityService
with ReadableUsersAuthenticator with ReferrerAuthenticator with CollaboratorsAuthenticator with PullRequestService with WebHookIssueCommentService =>
case class IssueCreateForm(title: String, content: Option[String],
assignedUserName: Option[String], milestoneId: Option[Int], labelNames: Option[String])
case class CommentForm(issueId: Int, content: String)
case class IssueStateForm(issueId: Int, content: Option[String])
val issueCreateForm = mapping(
"title" -> trim(label("Title", text(required))),
"content" -> trim(optional(text())),
"assignedUserName" -> trim(optional(text())),
"milestoneId" -> trim(optional(number())),
"labelNames" -> trim(optional(text()))
)(IssueCreateForm.apply)
val issueTitleEditForm = mapping(
"title" -> trim(label("Title", text(required)))
)(x => x)
val issueEditForm = mapping(
"content" -> trim(optional(text()))
)(x => x)
val commentForm = mapping(
"issueId" -> label("Issue Id", number()),
"content" -> trim(label("Comment", text(required)))
)(CommentForm.apply)
val issueStateForm = mapping(
"issueId" -> label("Issue Id", number()),
"content" -> trim(optional(text()))
)(IssueStateForm.apply)
get("/:owner/:repository/issues")(referrersOnly { repository =>
val q = request.getParameter("q")
if(Option(q).exists(_.contains("is:pr"))){
redirect(s"/${repository.owner}/${repository.name}/pulls?q=" + StringUtil.urlEncode(q))
} else {
searchIssues(repository)
}
})
get("/:owner/:repository/issues/:id")(referrersOnly { repository =>
defining(repository.owner, repository.name, params("id")){ case (owner, name, issueId) =>
getIssue(owner, name, issueId) map {
html.issue(
_,
getComments(owner, name, issueId.toInt),
getIssueLabels(owner, name, issueId.toInt),
(getCollaborators(owner, name) ::: (if(getAccountByUserName(owner).get.isGroupAccount) Nil else List(owner))).sorted,
getMilestonesWithIssueCount(owner, name),
getLabels(owner, name),
hasWritePermission(owner, name, context.loginAccount),
repository)
} getOrElse NotFound
}
})
/**
* https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
*/
get("/api/v3/repos/:owner/:repository/issues/:id/comments")(referrersOnly { repository =>
(for{
issueId <- params("id").toIntOpt
comments = getCommentsForApi(repository.owner, repository.name, issueId.toInt)
} yield {
JsonFormat(comments.map{ case (issueComment, user) => ApiComment(issueComment, RepositoryName(repository), issueId, ApiUser(user)) })
}).getOrElse(NotFound)
})
get("/:owner/:repository/issues/new")(readableUsersOnly { repository =>
defining(repository.owner, repository.name){ case (owner, name) =>
html.create(
(getCollaborators(owner, name) ::: (if(getAccountByUserName(owner).get.isGroupAccount) Nil else List(owner))).sorted,
getMilestones(owner, name),
getLabels(owner, name),
hasWritePermission(owner, name, context.loginAccount),
repository)
}
})
post("/:owner/:repository/issues/new", issueCreateForm)(readableUsersOnly { (form, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
val writable = hasWritePermission(owner, name, context.loginAccount)
val userName = context.loginAccount.get.userName
// insert issue
val issueId = createIssue(owner, name, userName, form.title, form.content,
if(writable) form.assignedUserName else None,
if(writable) form.milestoneId else None)
// insert labels
if(writable){
form.labelNames.map { value =>
val labels = getLabels(owner, name)
value.split(",").foreach { labelName =>
labels.find(_.labelName == labelName).map { label =>
registerIssueLabel(owner, name, issueId, label.labelId)
}
}
}
}
// record activity
recordCreateIssueActivity(owner, name, userName, issueId, form.title)
getIssue(owner, name, issueId.toString).foreach { issue =>
// extract references and create refer comment
createReferComment(owner, name, issue, form.title + " " + form.content.getOrElse(""))
// call web hooks
callIssuesWebHook("opened", repository, issue, context.baseUrl, context.loginAccount.get)
// notifications
Notifier().toNotify(repository, issue, form.content.getOrElse("")){
Notifier.msgIssue(s"${context.baseUrl}/${owner}/${name}/issues/${issueId}")
}
}
redirect(s"/${owner}/${name}/issues/${issueId}")
}
})
ajaxPost("/:owner/:repository/issues/edit_title/:id", issueTitleEditForm)(readableUsersOnly { (title, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
getIssue(owner, name, params("id")).map { issue =>
if(isEditable(owner, name, issue.openedUserName)){
// update issue
updateIssue(owner, name, issue.issueId, title, issue.content)
// extract references and create refer comment
createReferComment(owner, name, issue.copy(title = title), title)
redirect(s"/${owner}/${name}/issues/_data/${issue.issueId}")
} else Unauthorized
} getOrElse NotFound
}
})
ajaxPost("/:owner/:repository/issues/edit/:id", issueEditForm)(readableUsersOnly { (content, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
getIssue(owner, name, params("id")).map { issue =>
if(isEditable(owner, name, issue.openedUserName)){
// update issue
updateIssue(owner, name, issue.issueId, issue.title, content)
// extract references and create refer comment
createReferComment(owner, name, issue, content.getOrElse(""))
redirect(s"/${owner}/${name}/issues/_data/${issue.issueId}")
} else Unauthorized
} getOrElse NotFound
}
})
post("/:owner/:repository/issue_comments/new", commentForm)(readableUsersOnly { (form, repository) =>
handleComment(form.issueId, Some(form.content), repository)() map { case (issue, id) =>
redirect(s"/${repository.owner}/${repository.name}/${
if(issue.isPullRequest) "pull" else "issues"}/${form.issueId}#comment-${id}")
} getOrElse NotFound
})
/**
* https://developer.github.com/v3/issues/comments/#create-a-comment
*/
post("/api/v3/repos/:owner/:repository/issues/:id/comments")(readableUsersOnly { repository =>
(for{
issueId <- params("id").toIntOpt
body <- extractFromJsonBody[CreateAComment].map(_.body) if ! body.isEmpty
(issue, id) <- handleComment(issueId, Some(body), repository)()
issueComment <- getComment(repository.owner, repository.name, id.toString())
} yield {
JsonFormat(ApiComment(issueComment, RepositoryName(repository), issueId, ApiUser(context.loginAccount.get)))
}) getOrElse NotFound
})
post("/:owner/:repository/issue_comments/state", issueStateForm)(readableUsersOnly { (form, repository) =>
handleComment(form.issueId, form.content, repository)() map { case (issue, id) =>
redirect(s"/${repository.owner}/${repository.name}/${
if(issue.isPullRequest) "pull" else "issues"}/${form.issueId}#comment-${id}")
} getOrElse NotFound
})
ajaxPost("/:owner/:repository/issue_comments/edit/:id", commentForm)(readableUsersOnly { (form, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
getComment(owner, name, params("id")).map { comment =>
if(isEditable(owner, name, comment.commentedUserName)){
updateComment(comment.commentId, form.content)
redirect(s"/${owner}/${name}/issue_comments/_data/${comment.commentId}")
} else Unauthorized
} getOrElse NotFound
}
})
ajaxPost("/:owner/:repository/issue_comments/delete/:id")(readableUsersOnly { repository =>
defining(repository.owner, repository.name){ case (owner, name) =>
getComment(owner, name, params("id")).map { comment =>
if(isEditable(owner, name, comment.commentedUserName)){
Ok(deleteComment(comment.commentId))
} else Unauthorized
} getOrElse NotFound
}
})
ajaxGet("/:owner/:repository/issues/_data/:id")(readableUsersOnly { repository =>
getIssue(repository.owner, repository.name, params("id")) map { x =>
if(isEditable(x.userName, x.repositoryName, x.openedUserName)){
params.get("dataType") collect {
case t if t == "html" => html.editissue(
x.content, x.issueId, x.userName, x.repositoryName)
} getOrElse {
contentType = formats("json")
org.json4s.jackson.Serialization.write(
Map("title" -> x.title,
"content" -> Markdown.toHtml(x.content getOrElse "No description given.",
repository, false, true, true, true, isEditable(x.userName, x.repositoryName, x.openedUserName))
))
}
} else Unauthorized
} getOrElse NotFound
})
ajaxGet("/:owner/:repository/issue_comments/_data/:id")(readableUsersOnly { repository =>
getComment(repository.owner, repository.name, params("id")) map { x =>
if(isEditable(x.userName, x.repositoryName, x.commentedUserName)){
params.get("dataType") collect {
case t if t == "html" => html.editcomment(
x.content, x.commentId, x.userName, x.repositoryName)
} getOrElse {
contentType = formats("json")
org.json4s.jackson.Serialization.write(
Map("content" -> view.Markdown.toHtml(x.content,
repository, false, true, true, isEditable(x.userName, x.repositoryName, x.commentedUserName))
))
}
} else Unauthorized
} getOrElse NotFound
})
ajaxPost("/:owner/:repository/issues/new/label")(collaboratorsOnly { repository =>
val labelNames = params("labelNames").split(",")
val labels = getLabels(repository.owner, repository.name).filter(x => labelNames.contains(x.labelName))
html.labellist(labels)
})
ajaxPost("/:owner/:repository/issues/:id/label/new")(collaboratorsOnly { repository =>
defining(params("id").toInt){ issueId =>
registerIssueLabel(repository.owner, repository.name, issueId, params("labelId").toInt)
html.labellist(getIssueLabels(repository.owner, repository.name, issueId))
}
})
ajaxPost("/:owner/:repository/issues/:id/label/delete")(collaboratorsOnly { repository =>
defining(params("id").toInt){ issueId =>
deleteIssueLabel(repository.owner, repository.name, issueId, params("labelId").toInt)
html.labellist(getIssueLabels(repository.owner, repository.name, issueId))
}
})
ajaxPost("/:owner/:repository/issues/:id/assign")(collaboratorsOnly { repository =>
updateAssignedUserName(repository.owner, repository.name, params("id").toInt, assignedUserName("assignedUserName"))
Ok("updated")
})
ajaxPost("/:owner/:repository/issues/:id/milestone")(collaboratorsOnly { repository =>
updateMilestoneId(repository.owner, repository.name, params("id").toInt, milestoneId("milestoneId"))
milestoneId("milestoneId").map { milestoneId =>
getMilestonesWithIssueCount(repository.owner, repository.name)
.find(_._1.milestoneId == milestoneId).map { case (_, openCount, closeCount) =>
gitbucket.core.issues.milestones.html.progress(openCount + closeCount, closeCount)
} getOrElse NotFound
} getOrElse Ok()
})
post("/:owner/:repository/issues/batchedit/state")(collaboratorsOnly { repository =>
defining(params.get("value")){ action =>
action match {
case Some("open") => executeBatch(repository) { handleComment(_, None, repository)( _ => Some("reopen")) }
case Some("close") => executeBatch(repository) { handleComment(_, None, repository)( _ => Some("close")) }
case _ => // TODO BadRequest
}
}
})
post("/:owner/:repository/issues/batchedit/label")(collaboratorsOnly { repository =>
params("value").toIntOpt.map{ labelId =>
executeBatch(repository) { issueId =>
getIssueLabel(repository.owner, repository.name, issueId, labelId) getOrElse {
registerIssueLabel(repository.owner, repository.name, issueId, labelId)
}
}
} getOrElse NotFound
})
post("/:owner/:repository/issues/batchedit/assign")(collaboratorsOnly { repository =>
defining(assignedUserName("value")){ value =>
executeBatch(repository) {
updateAssignedUserName(repository.owner, repository.name, _, value)
}
}
})
post("/:owner/:repository/issues/batchedit/milestone")(collaboratorsOnly { repository =>
defining(milestoneId("value")){ value =>
executeBatch(repository) {
updateMilestoneId(repository.owner, repository.name, _, value)
}
}
})
get("/:owner/:repository/_attached/:file")(referrersOnly { repository =>
(Directory.getAttachedDir(repository.owner, repository.name) match {
case dir if(dir.exists && dir.isDirectory) =>
dir.listFiles.find(_.getName.startsWith(params("file") + ".")).map { file =>
RawData(FileUtil.getMimeType(file.getName), file)
}
case _ => None
}) getOrElse NotFound
})
val assignedUserName = (key: String) => params.get(key) filter (_.trim != "")
val milestoneId: String => Option[Int] = (key: String) => params.get(key).flatMap(_.toIntOpt)
private def isEditable(owner: String, repository: String, author: String)(implicit context: Context): Boolean =
hasWritePermission(owner, repository, context.loginAccount) || author == context.loginAccount.get.userName
private def executeBatch(repository: RepositoryService.RepositoryInfo)(execute: Int => Unit) = {
params("checked").split(',') map(_.toInt) foreach execute
params("from") match {
case "issues" => redirect(s"/${repository.owner}/${repository.name}/issues")
case "pulls" => redirect(s"/${repository.owner}/${repository.name}/pulls")
}
}
private def createReferComment(owner: String, repository: String, fromIssue: Issue, message: String) = {
StringUtil.extractIssueId(message).foreach { issueId =>
val content = fromIssue.issueId + ":" + fromIssue.title
if(getIssue(owner, repository, issueId).isDefined){
// Not add if refer comment already exist.
if(!getComments(owner, repository, issueId.toInt).exists { x => x.action == "refer" && x.content == content }) {
createComment(owner, repository, context.loginAccount.get.userName, issueId.toInt, content, "refer")
}
}
}
}
/**
* @see [[https://github.com/takezoe/gitbucket/wiki/CommentAction]]
*/
private def handleComment(issueId: Int, content: Option[String], repository: RepositoryService.RepositoryInfo)
(getAction: Issue => Option[String] =
p1 => params.get("action").filter(_ => isEditable(p1.userName, p1.repositoryName, p1.openedUserName))) = {
defining(repository.owner, repository.name){ case (owner, name) =>
val userName = context.loginAccount.get.userName
getIssue(owner, name, issueId.toString) flatMap { issue =>
val (action, recordActivity) =
getAction(issue)
.collect {
case "close" if(!issue.closed) => true ->
(Some("close") -> Some(if(issue.isPullRequest) recordClosePullRequestActivity _ else recordCloseIssueActivity _))
case "reopen" if(issue.closed) => false ->
(Some("reopen") -> Some(recordReopenIssueActivity _))
}
.map { case (closed, t) =>
updateClosed(owner, name, issueId, closed)
t
}
.getOrElse(None -> None)
val commentId = (content, action) match {
case (None, None) => None
case (None, Some(action)) => Some(createComment(owner, name, userName, issueId, action.capitalize, action))
case (Some(content), _) => Some(createComment(owner, name, userName, issueId, content, action.map(_+ "_comment").getOrElse("comment")))
}
// record comment activity if comment is entered
content foreach {
(if(issue.isPullRequest) recordCommentPullRequestActivity _ else recordCommentIssueActivity _)
(owner, name, userName, issueId, _)
}
recordActivity foreach ( _ (owner, name, userName, issueId, issue.title) )
// extract references and create refer comment
content.map { content =>
createReferComment(owner, name, issue, content)
}
// call web hooks
action match {
case None => commentId.map{ commentIdSome => callIssueCommentWebHook(repository, issue, commentIdSome, context.loginAccount.get) }
case Some(act) => val webHookAction = act match {
case "open" => "opened"
case "reopen" => "reopened"
case "close" => "closed"
case _ => act
}
if(issue.isPullRequest){
callPullRequestWebHook(webHookAction, repository, issue.issueId, context.baseUrl, context.loginAccount.get)
} else {
callIssuesWebHook(webHookAction, repository, issue, context.baseUrl, context.loginAccount.get)
}
}
// notifications
Notifier() match {
case f =>
content foreach {
f.toNotify(repository, issue, _){
Notifier.msgComment(s"${context.baseUrl}/${owner}/${name}/${
if(issue.isPullRequest) "pull" else "issues"}/${issueId}#comment-${commentId.get}")
}
}
action foreach {
f.toNotify(repository, issue, _){
Notifier.msgStatus(s"${context.baseUrl}/${owner}/${name}/issues/${issueId}")
}
}
}
commentId.map( issue -> _ )
}
}
}
private def searchIssues(repository: RepositoryService.RepositoryInfo) = {
defining(repository.owner, repository.name){ case (owner, repoName) =>
val page = IssueSearchCondition.page(request)
val sessionKey = Keys.Session.Issues(owner, repoName)
// retrieve search condition
val condition = session.putAndGet(sessionKey,
if(request.hasQueryString){
val q = request.getParameter("q")
if(q == null || q.trim.isEmpty){
IssueSearchCondition(request)
} else {
IssueSearchCondition(q, getMilestones(owner, repoName).map(x => (x.title, x.milestoneId)).toMap)
}
} else session.getAs[IssueSearchCondition](sessionKey).getOrElse(IssueSearchCondition())
)
html.list(
"issues",
searchIssue(condition, false, (page - 1) * IssueLimit, IssueLimit, owner -> repoName),
page,
if(!getAccountByUserName(owner).exists(_.isGroupAccount)){
(getCollaborators(owner, repoName) :+ owner).sorted
} else {
getCollaborators(owner, repoName)
},
getMilestones(owner, repoName),
getLabels(owner, repoName),
countIssue(condition.copy(state = "open" ), false, owner -> repoName),
countIssue(condition.copy(state = "closed"), false, owner -> repoName),
condition,
repository,
hasWritePermission(owner, repoName, context.loginAccount))
}
}
}
| snowgooseyk/gitbucket | src/main/scala/gitbucket/core/controller/IssuesController.scala | Scala | apache-2.0 | 20,647 |
package crdts
import org.scalatest.{Matchers, FlatSpec}
import scala.collection.SortedMap
class HandOffSpec extends FlatSpec with Matchers {
import HandOff._
"A Node" should "join nicely" in {
val i = Node("i", 2,
sck = 2,
values = SortedMap("i" -> 9))
val j = Node("j", 1,
dck = 5,
values = SortedMap("j" -> 1021))
val j2 = j.join(i)
j2.sck should be (0)
j2.dck should be (6)
j2.values should be (SortedMap("j" -> 1021))
j2.slots should be (SortedMap("i" -> (2, 5)))
}
}
| maylencita/counters | src/test/scala/crdts/HandOffSpec.scala | Scala | cc0-1.0 | 536 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.cache
import java.io.{File, FileInputStream, FileOutputStream}
import java.util.Properties
import com.typesafe.scalalogging.LazyLogging
/**
* Simple persistence strategy that keeps values in memory and writes them to a prop file on disk.
*/
class FilePersistence(dir: File, file: String) extends PropertiesPersistence with LazyLogging {
// ensure directory is present and available
require((!dir.exists() && dir.mkdirs()) || dir.isDirectory)
private val configFile = new File(dir, file)
logger.debug(s"Using data file '${configFile.getAbsolutePath}'")
override protected def load(properties: Properties): Unit = this.synchronized {
if (configFile.exists) {
val inputStream = new FileInputStream(configFile)
try {
properties.load(inputStream)
} finally {
inputStream.close()
}
}
}
override protected def persist(properties: Properties): Unit = this.synchronized {
val outputStream = new FileOutputStream(configFile)
try {
properties.store(outputStream, "GeoMesa configuration file")
} finally {
outputStream.close()
}
}
}
| aheyne/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/cache/FilePersistence.scala | Scala | apache-2.0 | 1,627 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.InfoColumnTrl
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 20/10/17.
*/
/**
* Info Column Trl Mapping
*/
trait InfoColumnTrlMapping {
val queryInfoColumnTrl = quote {
querySchema[InfoColumnTrl]("AD_InfoColumn_Trl",
_.infoColumnId-> "AD_InfoColumn_ID",
_.tenantId -> "AD_Client_ID" ,
_.organizationId -> "AD_Org_ID",
_.language-> "AD_Language",
_.isActive-> "IsActive",
_.created-> "Created",
_.createdBy-> "CreatedBy",
_.updated-> "Updated",
_.updatedBy-> "UpdatedBy",
_.isTranslated-> "IsTranslated",
_.name-> "Name",
_.description-> "Description",
_.help-> "Help",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/InfoColumnTrlMapping.scala | Scala | gpl-3.0 | 1,756 |
package com.twitter.finagle
import com.twitter.conversions.DurationOps._
import com.twitter.util.{Await, Return}
import org.scalacheck.Gen
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatestplus.junit.AssertionsForJUnit
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import org.scalatest.funsuite.AnyFunSuite
class StatusTest
extends AnyFunSuite
with AssertionsForJUnit
with ScalaCheckDrivenPropertyChecks
with Eventually
with IntegrationPatience {
val status1 = Gen.oneOf(Status.Open, Status.Busy, Status.Closed)
val status2 = for (left <- status1; right <- status1) yield (left, right)
test("Status.bestOf can terminate early") {
val res = Status.bestOf[Function0[Status]](
List(() => Status.Busy, () => Status.Open, () => fail("element should not be evaluated")),
_.apply
)
assert(res == Status.Open)
}
test("Status.worstOf can terminate early") {
val res = Status.worstOf[Function0[Status]](
List(() => Status.Busy, () => Status.Closed, () => fail("element should not be evaluated")),
_.apply
)
assert(res == Status.Closed)
}
// This test is borderline silly.
test("Status.worst") {
forAll(status2) {
case (left, right) =>
val s = Status.worst(left, right)
assert(Ordering[Status].equiv(left, right) || s == Ordering[Status].min(left, right))
}
}
// This test is borderline silly.
test("Status.best") {
forAll(status2) {
case (left, right) =>
val s = Status.best(left, right)
assert(Ordering[Status].equiv(left, right) || s == Ordering[Status].max(left, right))
}
}
test("Status.whenOpen - opens") {
@volatile var status: Status = Status.Busy
val open = Status.whenOpen(status)
assert(open.poll.isEmpty)
status = Status.Open
eventually { assert(open.poll == Some(Return.Unit)) }
Await.result(open, 5.seconds) // no exceptions
}
test("Status.whenOpen - closes") {
@volatile var status: Status = Status.Busy
val open = Status.whenOpen(status)
assert(open.poll.isEmpty)
status = Status.Closed
eventually { assert(open.poll.isDefined) }
intercept[Status.ClosedException] { Await.result(open, 5.seconds) }
}
test("Ordering spot check") {
val ord = Array(Status.Closed, Status.Busy, Status.Open)
val idx2 = for {
left <- Gen.choose(0, ord.length - 1)
right <- Gen.choose(0, ord.length - 1)
} yield (left, right)
forAll(idx2) {
case (left, right) =>
assert(Ordering[Status].compare(ord(left), ord(right)).signum == (left - right).signum)
}
}
}
| twitter/finagle | finagle-core/src/test/scala/com/twitter/finagle/StatusTest.scala | Scala | apache-2.0 | 2,669 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.elastic6
import com.datamountaineer.streamreactor.connect.elastic6.config.ElasticConfigConstants
import scala.collection.JavaConverters._
class TestElasticsSinkConnector extends TestElasticBase {
"Should start a Elastic Search Connector" in {
//get config
val config = getElasticSinkConfigProps
//get connector
val connector = new ElasticSinkConnector()
//start with config
connector.start(config)
//check config
val taskConfigs = connector.taskConfigs(10)
taskConfigs.asScala.head.get(ElasticConfigConstants.URL) shouldBe ELASTIC_SEARCH_HOSTNAMES
taskConfigs.size() shouldBe 10
//check connector
connector.taskClass() shouldBe classOf[ElasticSinkTask]
connector.stop()
}
} | CodeSmell/stream-reactor | kafka-connect-elastic6/src/test/scala/com/datamountaineer/streamreactor/connect/elastic6/TestElasticsSinkConnector.scala | Scala | apache-2.0 | 1,379 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.util
import org.apache.log4j.Level
// More Scala-friendly indented logger API
trait Logging {
// Error with optional parameters
def error(message: ⇒ String, parameters: ⇒ Seq[(String, String)] = Seq())(implicit logger: IndentedLogger) =
logger.logError("", message, flattenTuples(parameters): _*)
// Warn with optional parameters
def warn(message: ⇒ String, parameters: ⇒ Seq[(String, String)] = Seq())(implicit logger: IndentedLogger) =
logger.logWarning("", message, flattenTuples(parameters): _*)
// Info with optional parameters
def info(message: ⇒ String, parameters: ⇒ Seq[(String, String)] = Seq())(implicit logger: IndentedLogger) =
if (logger.isInfoEnabled)
logger.logInfo("", message, flattenTuples(parameters): _*)
// Debug with optional parameters
def debug(message: ⇒ String, parameters: ⇒ Seq[(String, String)] = Seq())(implicit logger: IndentedLogger) =
if (logger.isDebugEnabled)
logger.logDebug("", message, flattenTuples(parameters): _*)
// Debug with optional parameters
def log(logLevel: Level, message: ⇒ String, parameters: ⇒ Seq[(String, String)] = Seq())(implicit logger: IndentedLogger) =
if (logger.isDebugEnabled)
logger.log(logLevel, "", message, flattenTuples(parameters): _*)
// Debug block with optional parameters
def withDebug[T](message: ⇒ String, parameters: ⇒ Seq[(String, String)] = Seq())(body: ⇒ T)(implicit logger: IndentedLogger): T =
try {
if (logger.isDebugEnabled)
logger.startHandleOperation("", message, flattenTuples(parameters): _*)
body
} finally {
if (logger.isDebugEnabled)
logger.endHandleOperation()
}
// Run the given block only in debug mode
def ifDebug[T](body: ⇒ T)(implicit logger: IndentedLogger): Unit =
if (logger.isDebugEnabled)
body
// Whether debug logging is enabled
def debugEnabled(implicit logger: IndentedLogger) = logger.isDebugEnabled
// Call from a result block to set result parameters
def debugResults(parameters: ⇒ Seq[(String, String)])(implicit logger: IndentedLogger) =
if (logger.isDebugEnabled)
logger.setDebugResults(flattenTuples(parameters): _*)
private def flattenTuples(tuples: Seq[(String, String)]) =
tuples flatMap { case (n, v) ⇒ Seq(n, v) }
}
| wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/util/Logging.scala | Scala | lgpl-2.1 | 2,994 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package scaps.nucleus.indexing
import java.util.regex.Pattern
import scaps.nucleus.Contravariant
import scaps.nucleus.Covariant
import scaps.nucleus.Invariant
import scaps.nucleus.LanguageSettings
import scaps.nucleus.TypeParam
import scaps.nucleus.TypeRef
import scaps.nucleus.Variance
import scaps.nucleus.Type
private[nucleus] object TypeNormalization {
import scaps.nucleus.indexing.{ InternalTypes => I }
def substituteTypeParams(tpe: Type): TypeRef = {
def loop(tr: TypeRef): TypeRef =
tr match {
case t @ TypeRef(v, name, args) =>
tpe.params.find(_.name == name)
.map { tp =>
v match {
case Covariant =>
tp.lowerBound
.getOrElse(I.Bottom(v, args))
case Contravariant =>
tp.upperBound
.getOrElse(I.Top(v, args))
case Invariant =>
I.Unknown(v, args)
}
}
.getOrElse(t.copy(args = args.map(loop)))
case t => t.copy(args = t.args.map(loop))
}
loop(tpe.ref)
}
def normalize(tpe: TypeRef): TypeRef = {
def curryFunctions(tpe: TypeRef): TypeRef =
tpe match {
case I.Fn(v, a :: (as @ (_ :: _)), res) =>
I.Fn(v, List(a), curryFunctions(I.Fn(v, as, res)))
case t => t.copy(args = t.args.map(curryFunctions))
}
def uncurryOutermostFunctionApplications(tpe: TypeRef): TypeRef = {
def outermostArgsAndResult(tpe: TypeRef, prevArgs: List[TypeRef] = Nil): (List[TypeRef], TypeRef) =
tpe match {
case I.Fn(Covariant, args, res) =>
outermostArgsAndResult(res, prevArgs ++ args)
case t => (prevArgs, t)
}
val (args, res) = outermostArgsAndResult(tpe)
if (args.isEmpty)
res
else
I.Fn(tpe.variance, args, res)
}
val normalize =
(curryFunctions _) andThen
(uncurryOutermostFunctionApplications _)
normalize(tpe)
}
def renameTypeParams(paramsWithNewName: List[(TypeParam, String)], tpe: TypeRef): TypeRef =
tpe match {
case t @ TypeRef(v, name, args) =>
val renamedArgs = args.map(renameTypeParams(paramsWithNewName, _))
paramsWithNewName.find(_._1.name == name).fold {
t.copy(args = renamedArgs)
} {
case (_, newName) =>
TypeRef(v, newName, renamedArgs)
}
}
}
| scala-search/scaps | nucleus/src/main/scala/scaps/nucleus/indexing/TypeNormalization.scala | Scala | mpl-2.0 | 2,665 |
package fpinscala.datastructures
sealed trait Tree[+A]
case class Leaf[A](value: A) extends Tree[A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
object Tree {
def size[A](t: Tree[A]): Int = t match {
case Leaf(_) => 1
case Branch(l, r) => size(l) + size(r) + 1
}
val tt = Branch(Leaf(4), Branch(Leaf(1), Branch(Leaf(3), Leaf(6))))
def max(t: Tree[Int]) : Int = t match {
case Leaf(v) => v
case Branch(l,r) => max(l).max(max(r))
}
def depth[A](t: Tree[A]): Int = t match {
case Leaf(_) => 1
case Branch(l,r) => 1 + depth(l).max(depth(r))
}
def map[A,B](t: Tree[A])(f: A => B): Tree[B] = t match {
case Branch(l, r) => Branch(map(l)(f), map(r)(f))
case Leaf(v) => Leaf(f(v))
}
// def fold[A,B](t: Tree[A], z: B)(f: (A,B) => B): B = t match {
// case Leaf(v) => f(v, z)
// case Branch(l, r) => fold(r, fold(l, z)(f))(f)
// }
}
| svenski/fpinscala | exercises/src/main/scala/fpinscala/datastructures/Tree.scala | Scala | mit | 922 |
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.utils.collection.immutable
/**
* Created by fpin on 11/23/16.
*/
class UniqueSeqMap[K, V](
val entries: Seq[(K, V)] = Seq[(K, V)](),
val map: Map[K, V] = Map[K, V]()
) extends UniqueSeqMapLike[K, V, UniqueSeqMap[K, V]]{
override def copy(entries: Seq[(K, V)], map: Map[K, V]): UniqueSeqMap[K, V] = {
new UniqueSeqMap(entries, map)
}
}
object UniqueSeqMap {
def apply[K, V](pairs: (K, V)*): UniqueSeqMap[K, V] = {
new UniqueSeqMap[K, V]() ++ pairs
}
def apply[K, V](pairs: Iterable[(K, V)]): UniqueSeqMap[K, V] = {
new UniqueSeqMap[K, V]() ++ pairs
}
}
| flaminem/flamy | src/main/scala/com/flaminem/flamy/utils/collection/immutable/UniqueSeqMap.scala | Scala | apache-2.0 | 1,188 |
package models.export.format
import akka.stream.scaladsl.Source
import akka.util.ByteString
import java.io.{ByteArrayOutputStream,FilterOutputStream,OutputStream}
import models.export.rows.Rows
/** Provides a java.io.OutputStream-friendly streaming format.
*
* Implementors will decide upon a Context class (for instance,
* java.io.ZipOutputStream) and then write to it when prompted -- and *only*
* when prompted.
*/
trait WriteBasedFormat[Context] { self: Format =>
import WriteBasedFormat.Step
/** Creates the context you'll use to write things.
*
* For instance, the simplest context is just the OutputStream itself:
*
* override def createContext(sink: OutputStream) = sink
* override def writeBegin(context: OutputStream) = context.write(..)
* ...
*
* But more interesting would be to write to a zipfile:
*
* override def createContext(sink: OutputStream) = new ZipOutputStream(sink)
* override def writeBegin(context: ZipOutputStream) = context.write(...)
*
* And you may prefer to use a context that tracks state, too.
*/
protected def createContext(sink: OutputStream): Context
/** Writes the first few bytes of the output to context. */
protected def writeBegin(context: Context): Unit
/** Writes the header row to context. */
protected def writeHeaders(headers: Array[String], context: Context): Unit
/** Writes a row to context. */
protected def writeRow(row: Array[String], context: Context): Unit
/** Writes the end of the file to context.
*
* You must call any `flush` or `close` methods that are necessary to clear
* any buffers here.
*/
protected def writeEnd(context: Context): Unit
/** Provides an OutputStream for implementations and returns a Source.
*
* Implementations should implement the writeBegin(), writeHeaders(),
* writeRow() and writeEnd() methods, which will all call write() on some
* OutputStream that eventually writes to the `sink` passed to
* createContext(). This method will call them those methods at the correct
* moments and re-route their output to the returned enumerator.
*/
override def byteSource(rows: Rows): Source[ByteString, akka.NotUsed] = {
val sink = new ByteArrayOutputStream
val context = createContext(sink)
val steps: Source[Step, akka.NotUsed] = Source.single(Step.Begin)
.concat(Source.single(Step.Headers(rows.headers)))
.concat(rows.rows.map(Step.Row))
.concat(Source.single(Step.End))
steps
.map { step => stepToBytes(step, sink, context) }
.filter(_.nonEmpty) // an empty chunk ends an HTTP Chunked transfer
}
/** Calls writeXXX(xxx, context) and then returns the bytes that were
* written to context.
*/
private def stepToBytes(step: Step, sink: ByteArrayOutputStream, context: Context): ByteString = {
step match {
case Step.Begin => writeBegin(context)
case Step.Headers(headers) => writeHeaders(headers, context)
case Step.Row(row) => writeRow(row, context)
case Step.End => writeEnd(context)
}
val ret = sink.toByteArray
sink.reset
ByteString(ret)
}
}
object WriteBasedFormat {
sealed trait Step
object Step {
case object Begin extends Step
case class Headers(headers: Array[String]) extends Step
case class Row(row: Array[String]) extends Step
case object End extends Step
}
}
| overview/overview-server | web/app/models/export/format/WriteBasedFormat.scala | Scala | agpl-3.0 | 3,435 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.tools.status
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.kudu.data.KuduDataStore
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand.KuduParams
import org.locationtech.geomesa.kudu.tools.status.KuduDescribeSchemaCommand.KuduDescribeSchemaParams
import org.locationtech.geomesa.tools.RequiredTypeNameParam
import org.locationtech.geomesa.tools.status.DescribeSchemaCommand
class KuduDescribeSchemaCommand extends DescribeSchemaCommand[KuduDataStore] with KuduDataStoreCommand {
override val params = new KuduDescribeSchemaParams
}
object KuduDescribeSchemaCommand {
@Parameters(commandDescription = "Describe the attributes of a given GeoMesa feature type")
class KuduDescribeSchemaParams extends KuduParams with RequiredTypeNameParam
}
| aheyne/geomesa | geomesa-kudu/geomesa-kudu-tools/src/main/scala/org/locationtech/geomesa/kudu/tools/status/KuduDescribeSchemaCommand.scala | Scala | apache-2.0 | 1,351 |
package metronome.chrono
/**
* The Minguo calendar system.
* <p>
* This chronology defines the rules of the Minguo calendar system.
* This calendar system is primarily used in the Republic of China, often known as Taiwan.
* Dates are aligned such that {@code 0001-01-01 (Minguo)} is {@code 1912-01-01 (ISO)}.
* <p>
* The fields are defined as follows:
* <p><ul>
* <li>era - There are two eras, the current 'Republic' (ERA_ROC) and the previous era (ERA_BEFORE_ROC).
* <li>year-of-era - The year-of-era for the current era increases uniformly from the epoch at year one.
* For the previous era the year increases from one as time goes backwards.
* The value for the current era is equal to the ISO proleptic-year minus 1911.
* <li>proleptic-year - The proleptic year is the same as the year-of-era for the
* current era. For the previous era, years have zero, then negative values.
* The value is equal to the ISO proleptic-year minus 1911.
* <li>month-of-year - The Minguo month-of-year exactly matches ISO.
* <li>day-of-month - The Minguo day-of-month exactly matches ISO.
* <li>day-of-year - The Minguo day-of-year exactly matches ISO.
* <li>leap-year - The Minguo leap-year pattern exactly matches ISO, such that the two calendars
* are never out of step.
* </ul><p>
*
* @implSpec
* This class is immutable and thread-safe.
*
* @since 1.8
*/
object MinguoChronology {
/**
* Singleton instance for the Minguo chronology.
*/
final val INSTANCE: MinguoChronology = new MinguoChronology
/**
* Serialization version.
*/
private final val serialVersionUID: Long = 1039765215346859963L
/**
* The difference in years between ISO and Minguo.
*/
private[chrono] final val YEARS_DIFFERENCE: Int = 1911
}
final class MinguoChronology extends Chronology {
/**
* Restricted constructor.
*/
private def {
}
/**
* Gets the ID of the chronology - 'Minguo'.
* <p>
* The ID uniquely identifies the {@code Chronology}.
* It can be used to lookup the {@code Chronology} using {@link #of(String)}.
*
* @return the chronology ID - 'Minguo'
* @see #getCalendarType()
*/
def getId: String = {
"Minguo"
}
/**
* Gets the calendar type of the underlying calendar system - 'roc'.
* <p>
* The calendar type is an identifier defined by the
* <em>Unicode Locale Data Markup Language (LDML)</em> specification.
* It can be used to lookup the {@code Chronology} using {@link #of(String)}.
* It can also be used as part of a locale, accessible via
* {@link Locale#getUnicodeLocaleType(String)} with the key 'ca'.
*
* @return the calendar system type - 'roc'
* @see #getId()
*/
def getCalendarType: String = {
"roc"
}
/**
* Obtains a local date in Minguo calendar system from the
* era, year-of-era, month-of-year and day-of-month fields.
*
* @param era the Minguo era, not null
* @param yearOfEra the year-of-era
* @param month the month-of-year
* @param dayOfMonth the day-of-month
* @return the Minguo local date, not null
* @throws DateTimeException if unable to create the date
* @throws ClassCastException if the { @code era} is not a { @code MinguoEra}
*/
override def date(era: Era, yearOfEra: Int, month: Int, dayOfMonth: Int): MinguoDate = {
date(prolepticYear(era, yearOfEra), month, dayOfMonth)
}
/**
* Obtains a local date in Minguo calendar system from the
* proleptic-year, month-of-year and day-of-month fields.
*
* @param prolepticYear the proleptic-year
* @param month the month-of-year
* @param dayOfMonth the day-of-month
* @return the Minguo local date, not null
* @throws DateTimeException if unable to create the date
*/
def date(prolepticYear: Int, month: Int, dayOfMonth: Int): MinguoDate = {
new MinguoDate(LocalDate.of(prolepticYear + YEARS_DIFFERENCE, month, dayOfMonth))
}
/**
* Obtains a local date in Minguo calendar system from the
* era, year-of-era and day-of-year fields.
*
* @param era the Minguo era, not null
* @param yearOfEra the year-of-era
* @param dayOfYear the day-of-year
* @return the Minguo local date, not null
* @throws DateTimeException if unable to create the date
* @throws ClassCastException if the { @code era} is not a { @code MinguoEra}
*/
override def dateYearDay(era: Era, yearOfEra: Int, dayOfYear: Int): MinguoDate = {
dateYearDay(prolepticYear(era, yearOfEra), dayOfYear)
}
/**
* Obtains a local date in Minguo calendar system from the
* proleptic-year and day-of-year fields.
*
* @param prolepticYear the proleptic-year
* @param dayOfYear the day-of-year
* @return the Minguo local date, not null
* @throws DateTimeException if unable to create the date
*/
def dateYearDay(prolepticYear: Int, dayOfYear: Int): MinguoDate = {
new MinguoDate(LocalDate.ofYearDay(prolepticYear + YEARS_DIFFERENCE, dayOfYear))
}
/**
* Obtains a local date in the Minguo calendar system from the epoch-day.
*
* @param epochDay the epoch day
* @return the Minguo local date, not null
* @throws DateTimeException if unable to create the date
*/
def dateEpochDay(epochDay: Long): MinguoDate = {
new MinguoDate(LocalDate.ofEpochDay(epochDay))
}
override def dateNow: MinguoDate = {
dateNow(Clock.systemDefaultZone)
}
override def dateNow(zone: ZoneId): MinguoDate = {
dateNow(Clock.system(zone))
}
override def dateNow(clock: Clock): MinguoDate = {
date(LocalDate.now(clock))
}
def date(temporal: TemporalAccessor): MinguoDate = {
if (temporal.isInstanceOf[MinguoDate]) {
temporal.asInstanceOf[MinguoDate]
}
new MinguoDate(LocalDate.from(temporal))
}
override def localDateTime(temporal: TemporalAccessor): ChronoLocalDateTime[MinguoDate] = {
super.localDateTime(temporal).asInstanceOf[ChronoLocalDateTime[MinguoDate]]
}
override def zonedDateTime(temporal: TemporalAccessor): ChronoZonedDateTime[MinguoDate] = {
super.zonedDateTime(temporal).asInstanceOf[ChronoZonedDateTime[MinguoDate]]
}
override def zonedDateTime(instant: Instant, zone: ZoneId): ChronoZonedDateTime[MinguoDate] = {
super.zonedDateTime(instant, zone).asInstanceOf[ChronoZonedDateTime[MinguoDate]]
}
/**
* Checks if the specified year is a leap year.
* <p>
* Minguo leap years occur exactly in line with ISO leap years.
* This method does not validate the year passed in, and only has a
* well-defined result for years in the supported range.
*
* @param prolepticYear the proleptic-year to check, not validated for range
* @return true if the year is a leap year
*/
def isLeapYear(prolepticYear: Long): Boolean = {
IsoChronology.INSTANCE.isLeapYear(prolepticYear + YEARS_DIFFERENCE)
}
def prolepticYear(era: Era, yearOfEra: Int): Int = {
if (era.isInstanceOf[MinguoEra] == false) {
throw new ClassCastException("Era must be MinguoEra")
}
(if (era eq MinguoEra.ROC) yearOfEra else 1 - yearOfEra)
}
def eraOf(eraValue: Int): MinguoEra = {
MinguoEra.of(eraValue)
}
def eras: List[Era] = {
Arrays.asList[Era](MinguoEra.values)
}
def range(field: ChronoField): ValueRange = {
field match {
case PROLEPTIC_MONTH => {
val range: ValueRange = PROLEPTIC_MONTH.range
ValueRange.of(range.getMinimum - YEARS_DIFFERENCE * 12L, range.getMaximum - YEARS_DIFFERENCE * 12L)
}
case YEAR_OF_ERA => {
val range: ValueRange = YEAR.range
ValueRange.of(1, range.getMaximum - YEARS_DIFFERENCE, -range.getMinimum + 1 + YEARS_DIFFERENCE)
}
case YEAR => {
val range: ValueRange = YEAR.range
ValueRange.of(range.getMinimum - YEARS_DIFFERENCE, range.getMaximum - YEARS_DIFFERENCE)
}
}
field.range
}
def resolveDate(fieldValues: Map[TemporalField, Long], resolverStyle: ResolverStyle): MinguoDate = {
super.resolveDate(fieldValues, resolverStyle).asInstanceOf[MinguoDate]
}
}
/**
* A date in the Minguo calendar system.
* <p>
* This date operates using the {@linkplain MinguoChronology Minguo calendar}.
* This calendar system is primarily used in the Republic of China, often known as Taiwan.
* Dates are aligned such that {@code 0001-01-01 (Minguo)} is {@code 1912-01-01 (ISO)}.
*
* @implSpec
* This class is immutable and thread-safe.
*
* @since 1.8
*/
object MinguoDate {
/**
* Obtains the current {@code MinguoDate} from the system clock in the default time-zone.
* <p>
* This will query the {@link Clock#systemDefaultZone() system clock} in the default
* time-zone to obtain the current date.
* <p>
* Using this method will prevent the ability to use an alternate clock for testing
* because the clock is hard-coded.
*
* @return the current date using the system clock and default time-zone, not null
*/
def now: MinguoDate = {
now(Clock.systemDefaultZone)
}
/**
* Obtains the current {@code MinguoDate} from the system clock in the specified time-zone.
* <p>
* This will query the {@link Clock#system(ZoneId) system clock} to obtain the current date.
* Specifying the time-zone avoids dependence on the default time-zone.
* <p>
* Using this method will prevent the ability to use an alternate clock for testing
* because the clock is hard-coded.
*
* @param zone the zone ID to use, not null
* @return the current date using the system clock, not null
*/
def now(zone: ZoneId): MinguoDate = {
now(Clock.system(zone))
}
/**
* Obtains the current {@code MinguoDate} from the specified clock.
* <p>
* This will query the specified clock to obtain the current date - today.
* Using this method allows the use of an alternate clock for testing.
* The alternate clock may be introduced using {@linkplain Clock dependency injection}.
*
* @param clock the clock to use, not null
* @return the current date, not null
* @throws DateTimeException if the current date cannot be obtained
*/
def now(clock: Clock): MinguoDate = {
new MinguoDate(LocalDate.now(clock))
}
/**
* Obtains a {@code MinguoDate} representing a date in the Minguo calendar
* system from the proleptic-year, month-of-year and day-of-month fields.
* <p>
* This returns a {@code MinguoDate} with the specified fields.
* The day must be valid for the year and month, otherwise an exception will be thrown.
*
* @param prolepticYear the Minguo proleptic-year
* @param month the Minguo month-of-year, from 1 to 12
* @param dayOfMonth the Minguo day-of-month, from 1 to 31
* @return the date in Minguo calendar system, not null
* @throws DateTimeException if the value of any field is out of range,
* or if the day-of-month is invalid for the month-year
*/
def of(prolepticYear: Int, month: Int, dayOfMonth: Int): MinguoDate = {
new MinguoDate(LocalDate.of(prolepticYear + YEARS_DIFFERENCE, month, dayOfMonth))
}
/**
* Obtains a {@code MinguoDate} from a temporal object.
* <p>
* This obtains a date in the Minguo calendar system based on the specified temporal.
* A {@code TemporalAccessor} represents an arbitrary set of date and time information,
* which this factory converts to an instance of {@code MinguoDate}.
* <p>
* The conversion typically uses the {@link ChronoField#EPOCH_DAY EPOCH_DAY}
* field, which is standardized across calendar systems.
* <p>
* This method matches the signature of the functional interface {@link TemporalQuery}
* allowing it to be used as a query via method reference, {@code MinguoDate::from}.
*
* @param temporal the temporal object to convert, not null
* @return the date in Minguo calendar system, not null
* @throws DateTimeException if unable to convert to a { @code MinguoDate}
*/
def from(temporal: TemporalAccessor): MinguoDate = {
MinguoChronology.INSTANCE.date(temporal)
}
private[chrono] def readExternal(in: DataInput): MinguoDate = {
val year: Int = in.readInt
val month: Int = in.readByte
val dayOfMonth: Int = in.readByte
MinguoChronology.INSTANCE.date(year, month, dayOfMonth)
}
/**
* Serialization version.
*/
private final val serialVersionUID: Long = 1300372329181994526L
}
final class MinguoDate extends ChronoLocalDateImpl[MinguoDate] with ChronoLocalDate {
/**
* Creates an instance from an ISO date.
*
* @param isoDate the standard local date, validated not null
*/
private[chrono] def this(isoDate: LocalDate) {
this.isoDate = isoDate
}
/**
* Gets the chronology of this date, which is the Minguo calendar system.
* <p>
* The {@code Chronology} represents the calendar system in use.
* The era and other fields in {@link ChronoField} are defined by the chronology.
*
* @return the Minguo chronology, not null
*/
def getChronology: MinguoChronology = {
MinguoChronology.INSTANCE
}
/**
* Gets the era applicable at this date.
* <p>
* The Minguo calendar system has two eras, 'ROC' and 'BEFORE_ROC',
* defined by {@link MinguoEra}.
*
* @return the era applicable at this date, not null
*/
override def getEra: MinguoEra = {
(if (getProlepticYear >= 1) MinguoEra.ROC else MinguoEra.BEFORE_ROC)
}
/**
* Returns the length of the month represented by this date.
* <p>
* This returns the length of the month in days.
* Month lengths match those of the ISO calendar system.
*
* @return the length of the month in days
*/
def lengthOfMonth: Int = {
isoDate.lengthOfMonth
}
override def range(field: TemporalField): ValueRange = {
if (field.isInstanceOf[ChronoField]) {
if (isSupported(field)) {
val f: ChronoField = field.asInstanceOf[ChronoField]
f match {
case DAY_OF_MONTH =>
case DAY_OF_YEAR =>
case ALIGNED_WEEK_OF_MONTH =>
isoDate.range(field)
case YEAR_OF_ERA => {
val range: ValueRange = YEAR.range
val max: Long = (if (getProlepticYear <= 0) -range.getMinimum + 1 + YEARS_DIFFERENCE else range.getMaximum - YEARS_DIFFERENCE)
ValueRange.of(1, max)
}
}
getChronology.range(f)
}
throw new UnsupportedTemporalTypeException("Unsupported field: " + field)
}
field.rangeRefinedBy(this)
}
def getLong(field: TemporalField): Long = {
if (field.isInstanceOf[ChronoField]) {
field.asInstanceOf[ChronoField] match {
case PROLEPTIC_MONTH =>
getProlepticMonth
case YEAR_OF_ERA => {
val prolepticYear: Int = getProlepticYear
(if (prolepticYear >= 1) prolepticYear else 1 - prolepticYear)
}
case YEAR =>
getProlepticYear
case ERA =>
(if (getProlepticYear >= 1) 1 else 0)
}
isoDate.getLong(field)
}
field.getFrom(this)
}
private def getProlepticMonth: Long = {
getProlepticYear * 12L + isoDate.getMonthValue - 1
}
private def getProlepticYear: Int = {
isoDate.getYear - YEARS_DIFFERENCE
}
override def `with`(field: TemporalField, newValue: Long): MinguoDate = {
if (field.isInstanceOf[ChronoField]) {
val f: ChronoField = field.asInstanceOf[ChronoField]
if (getLong(f) == newValue) {
this
}
f match {
case PROLEPTIC_MONTH =>
getChronology.range(f).checkValidValue(newValue, f)
plusMonths(newValue - getProlepticMonth)
case YEAR_OF_ERA =>
case YEAR =>
case ERA => {
val nvalue: Int = getChronology.range(f).checkValidIntValue(newValue, f)
f match {
case YEAR_OF_ERA =>
`with`(isoDate.withYear(if (getProlepticYear >= 1) nvalue + YEARS_DIFFERENCE else (1 - nvalue) + YEARS_DIFFERENCE))
case YEAR =>
`with`(isoDate.withYear(nvalue + YEARS_DIFFERENCE))
case ERA =>
`with`(isoDate.withYear((1 - getProlepticYear) + YEARS_DIFFERENCE))
}
}
}
`with`(isoDate.`with`(field, newValue))
}
super.`with`(field, newValue)
}
/**
* {@inheritDoc}
* @throws DateTimeException { @inheritDoc}
* @throws ArithmeticException { @inheritDoc}
*/
override def `with`(adjuster: TemporalAdjuster): MinguoDate = {
super.`with`(adjuster)
}
/**
* {@inheritDoc}
* @throws DateTimeException { @inheritDoc}
* @throws ArithmeticException { @inheritDoc}
*/
override def plus(amount: TemporalAmount): MinguoDate = {
super.plus(amount)
}
/**
* {@inheritDoc}
* @throws DateTimeException { @inheritDoc}
* @throws ArithmeticException { @inheritDoc}
*/
override def minus(amount: TemporalAmount): MinguoDate = {
super.minus(amount)
}
private[chrono] def plusYears(years: Long): MinguoDate = {
`with`(isoDate.plusYears(years))
}
private[chrono] def plusMonths(months: Long): MinguoDate = {
`with`(isoDate.plusMonths(months))
}
private[chrono] override def plusWeeks(weeksToAdd: Long): MinguoDate = {
super.plusWeeks(weeksToAdd)
}
private[chrono] def plusDays(days: Long): MinguoDate = {
`with`(isoDate.plusDays(days))
}
override def plus(amountToAdd: Long, unit: TemporalUnit): MinguoDate = {
super.plus(amountToAdd, unit)
}
override def minus(amountToAdd: Long, unit: TemporalUnit): MinguoDate = {
super.minus(amountToAdd, unit)
}
private[chrono] override def minusYears(yearsToSubtract: Long): MinguoDate = {
super.minusYears(yearsToSubtract)
}
private[chrono] override def minusMonths(monthsToSubtract: Long): MinguoDate = {
super.minusMonths(monthsToSubtract)
}
private[chrono] override def minusWeeks(weeksToSubtract: Long): MinguoDate = {
super.minusWeeks(weeksToSubtract)
}
private[chrono] override def minusDays(daysToSubtract: Long): MinguoDate = {
super.minusDays(daysToSubtract)
}
private def `with`(newDate: LocalDate): MinguoDate = {
(if ((newDate == isoDate)) this else new MinguoDate(newDate))
}
final override def atTime(localTime: LocalTime): ChronoLocalDateTime[MinguoDate] = {
super.atTime(localTime).asInstanceOf[ChronoLocalDateTime[MinguoDate]]
}
def until(endDate: ChronoLocalDate): ChronoPeriod = {
val period: Period = isoDate.until(endDate)
getChronology.period(period.getYears, period.getMonths, period.getDays)
}
override def toEpochDay: Long = {
isoDate.toEpochDay
}
override def equals(obj: AnyRef): Boolean = {
if (this eq obj) {
true
}
if (obj.isInstanceOf[MinguoDate]) {
val otherDate: MinguoDate = obj.asInstanceOf[MinguoDate]
this.isoDate == otherDate.isoDate
}
false
}
override def hashCode: Int = {
getChronology.getId.hashCode ^ isoDate.hashCode
}
private def writeReplace: AnyRef = {
new Ser(Ser.MINGUO_DATE_TYPE, this)
}
private[chrono] def writeExternal(out: DataOutput) {
out.writeInt(get(YEAR))
out.writeByte(get(MONTH_OF_YEAR))
out.writeByte(get(DAY_OF_MONTH))
}
/**
* The underlying date.
*/
private final val isoDate: LocalDate = null
}
/**
* An era in the Minguo calendar system.
* <p>
* The Minguo calendar system has two eras.
* The current era, for years from 1 onwards, is known as the 'Republic of China' era.
* All previous years, zero or earlier in the proleptic count or one and greater
* in the year-of-era count, are part of the 'Before Republic of China' era.
* <p>
* <table summary="Minguo years and eras" cellpadding="2" cellspacing="3" border="0" >
* <thead>
* <tr class="tableSubHeadingColor">
* <th class="colFirst" align="left">year-of-era</th>
* <th class="colFirst" align="left">era</th>
* <th class="colFirst" align="left">proleptic-year</th>
* <th class="colLast" align="left">ISO proleptic-year</th>
* </tr>
* </thead>
* <tbody>
* <tr class="rowColor">
* <td>2</td><td>ROC</td><td>2</td><td>1913</td>
* </tr>
* <tr class="altColor">
* <td>1</td><td>ROC</td><td>1</td><td>1912</td>
* </tr>
* <tr class="rowColor">
* <td>1</td><td>BEFORE_ROC</td><td>0</td><td>1911</td>
* </tr>
* <tr class="altColor">
* <td>2</td><td>BEFORE_ROC</td><td>-1</td><td>1910</td>
* </tr>
* </tbody>
* </table>
* <p>
* <b>Do not use {@code ordinal()} to obtain the numeric representation of {@code MinguoEra}.
* Use {@code getValue()} instead.</b>
*
* @implSpec
* This is an immutable and thread-safe enum.
*
* @since 1.8
*/
object MinguoEra {
/**
* Obtains an instance of {@code MinguoEra} from an {@code int} value.
* <p>
* {@code MinguoEra} is an enum representing the Minguo eras of BEFORE_ROC/ROC.
* This factory allows the enum to be obtained from the {@code int} value.
*
* @param minguoEra the BEFORE_ROC/ROC value to represent, from 0 (BEFORE_ROC) to 1 (ROC)
* @return the era singleton, not null
* @throws DateTimeException if the value is invalid
*/
def of(minguoEra: Int): MinguoEra = {
minguoEra match {
case 0 =>
BEFORE_ROC
case 1 =>
ROC
case _ =>
throw new DateTimeException("Invalid era: " + minguoEra)
}
}
private[chrono] def readExternal(in: DataInput): MinguoEra = {
val eraValue: Byte = in.readByte
MinguoEra.of(eraValue)
}
/**
* The singleton instance for the era before the current one, 'Before Republic of China Era',
* which has the numeric value 0.
*/
final val BEFORE_ROC: = null
/**
* The singleton instance for the current era, 'Republic of China Era',
* which has the numeric value 1.
*/
final val ROC: = null
}
final class MinguoEra extends Era {
/**
* Gets the numeric era {@code int} value.
* <p>
* The era BEFORE_ROC has the value 0, while the era ROC has the value 1.
*
* @return the era value, from 0 (BEFORE_ROC) to 1 (ROC)
*/
def getValue: Int = {
ordinal
}
private def writeReplace: AnyRef = {
new Ser(Ser.MINGUO_ERA_TYPE, this)
}
private[chrono] def writeExternal(out: DataOutput) {
out.writeByte(this.getValue)
}
}
| javierg1975/metronome | src/main/scala/metronome/chrono/Minguo.scala | Scala | gpl-2.0 | 22,271 |
enum Bool {
case True
case False
}
import Bool.*
type Not[B <: Bool] = B match {
case True.type => False.type
case False.type => True.type
}
val t: True.type = True
val f: False.type = False
val g: Not[False.type] = t
val t1: Not[f.type] = t // transitivity
val f1: Not[t.type] = f // transitivity
val t2: Not[f1.type] = t1 // transitivity x2
val f2: Not[t1.type] = f1 // transitivity x2
| lampepfl/dotty | tests/pos/i10511.scala | Scala | apache-2.0 | 402 |
package sample.cluster.simple
import com.typesafe.config.ConfigFactory
import akka.actor.ActorSystem
import akka.actor.Props
object SimpleClusterApp {
def main(args: Array[String]): Unit = {
if (args.isEmpty)
startup(Seq("2551", "2552", "0"))
else
startup(args)
}
def startup(ports: Seq[String]): Unit = {
ports foreach { port =>
// Override the configuration of the port
val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port).
withFallback(ConfigFactory.load())
// Create an Akka system
val system = ActorSystem("ClusterSystem", config)
// Create an actor that handles cluster domain events
system.actorOf(Props[SimpleClusterListener], name = "clusterListener")
}
}
}
| linearregression/social_data_collector | src/main/scala/sample/cluster/simple/SimpleClusterApp.scala | Scala | cc0-1.0 | 777 |
package com.twitter.algebird
import scala.collection.immutable.SortedMap
object SpaceSaver {
/**
* Construct SpaceSaver with given capacity containing a single item.
* This is the public api to create a new SpaceSaver.
*/
def apply[T](capacity: Int, item: T): SpaceSaver[T] = SSOne(capacity, item)
private[algebird] val ordering = Ordering.by[(_, (Long, Long)), (Long, Long)]{ case (item, (count, err)) => (-count, err) }
implicit def spaceSaverSemiGroup[T]: Semigroup[SpaceSaver[T]] = new SpaceSaverSemigroup[T]
}
/**
* Data structure used in the Space-Saving Algorithm to find the approximate most frequent and top-k elements.
* The algorithm is described in "Efficient Computation of Frequent and Top-k Elements in Data Streams".
* See here: www.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf
* In the paper the data structure is called StreamSummary but we chose to call it SpaceSaver instead.
* Note that the adaptation to hadoop and parallelization were not described in the article and have not been proven
* to be mathematically correct or preserve the guarantees or benefits of the algorithm.
*/
sealed abstract class SpaceSaver[T] {
import SpaceSaver.ordering
/**
* Maximum number of counters to keep (parameter "m" in the research paper).
*/
def capacity: Int
/**
* Current lowest value for count
*/
def min: Long
/**
* Map of item to counter, where each counter consists of an observed count and possible over-estimation (error)
*/
def counters: Map[T, (Long, Long)]
def ++(other: SpaceSaver[T]): SpaceSaver[T]
/**
* returns the frequency estimate for the item
*/
def frequency(item: T): Approximate[Long] = {
val (count, err) = counters.getOrElse(item, (min, min))
Approximate(count - err, count, count, 1.0)
}
/**
* Get the elements that show up more than thres times.
* Returns sorted in descending order: (item, Approximate[Long], guaranteed)
*/
def mostFrequent(thres: Int): Seq[(T, Approximate[Long], Boolean)] =
counters
.iterator
.filter { case (item, (count, err)) => count >= thres }
.toList
.sorted(ordering)
.map { case (item, (count, err)) => (item, Approximate(count - err, count, count, 1.0), thres <= count - err) }
/**
* Get the top-k elements.
* Returns sorted in descending order: (item, Approximate[Long], guaranteed)
*/
def topK(k: Int): Seq[(T, Approximate[Long], Boolean)] = {
require(k < capacity)
val si = counters
.toList
.sorted(ordering)
val siK = si.take(k)
val countKPlus1 = si.drop(k).headOption.map(_._2._1).getOrElse(0L)
siK.map { case (item, (count, err)) => (item, Approximate(count - err, count, count, 1.0), countKPlus1 < count - err) }
}
/**
* Check consistency with other SpaceSaver, useful for testing.
* Returns boolean indicating if they are consistent
*/
def consistentWith(that: SpaceSaver[T]): Boolean =
(counters.keys ++ that.counters.keys).forall{ item => (frequency(item) - that.frequency(item)) ~ 0 }
}
case class SSOne[T](capacity: Int, item: T) extends SpaceSaver[T] {
require(capacity > 1)
def min: Long = 0L
def counters: Map[T, (Long, Long)] = Map(item -> (1L, 1L))
def ++(other: SpaceSaver[T]): SpaceSaver[T] = other match {
case other: SSOne[_] => SSMany(this).add(other)
case other: SSMany[_] => other.add(this)
}
}
object SSMany {
private def bucketsFromCounters[T](counters: Map[T, (Long, Long)]): SortedMap[Long, Set[T]] =
SortedMap[Long, Set[T]]() ++ counters.groupBy(_._2._1).mapValues(_.keySet)
private def apply[T](capacity: Int, counters: Map[T, (Long, Long)]): SSMany[T] =
SSMany(capacity, counters, bucketsFromCounters(counters))
private[algebird] def apply[T](one: SSOne[T]): SSMany[T] =
SSMany(one.capacity, Map(one.item -> (1L, 0L)), SortedMap(1L -> Set(one.item)))
}
case class SSMany[T](capacity: Int, counters: Map[T, (Long, Long)], buckets: SortedMap[Long, Set[T]]) extends SpaceSaver[T] {
private val exact: Boolean = counters.size < capacity
val min: Long = if (counters.size < capacity) 0L else buckets.firstKey
// item is already present and just needs to be bumped up one
private def bump(item: T) = {
val (count, err) = counters(item)
val counters1 = counters + (item -> (count + 1L, err)) // increment by one
val currBucket = buckets(count) // current bucket
val buckets1 = {
if (currBucket.size == 1) // delete current bucket since it will be empty
buckets - count
else // remove item from current bucket
buckets + (count -> (currBucket - item))
} + (count + 1L -> (buckets.getOrElse(count + 1L, Set()) + item))
SSMany(capacity, counters1, buckets1)
}
// lose one item to meet capacity constraint
private def loseOne = {
val firstBucket = buckets(buckets.firstKey)
val itemToLose = firstBucket.head
val counters1 = counters - itemToLose
val buckets1 = if (firstBucket.size == 1)
buckets - min
else
buckets + (min -> (firstBucket - itemToLose))
SSMany(capacity, counters1, buckets1)
}
// introduce new item
private def introduce(item: T, count: Long, err: Long) = {
val counters1 = counters + (item -> (count, err))
val buckets1 = buckets + (count -> (buckets.getOrElse(count, Set()) + item))
SSMany(capacity, counters1, buckets1)
}
// add a single element
private[algebird] def add(x: SSOne[T]): SSMany[T] = {
require(x.capacity == capacity)
if (counters.contains(x.item))
bump(x.item)
else
(if (exact) this else this.loseOne).introduce(x.item, min + 1L, min)
}
// merge two stream summaries
private def merge(x: SSMany[T]): SSMany[T] = {
require(x.capacity == capacity)
val counters1 = Map() ++
(counters.keySet ++ x.counters.keySet)
.toList
.map { key =>
val (count1, err1) = counters.getOrElse(key, (min, min))
val (count2, err2) = x.counters.getOrElse(key, (x.min, x.min))
(key -> (count1 + count2, err1 + err2))
}
.sorted(SpaceSaver.ordering)
.take(capacity)
SSMany(capacity, counters1)
}
def ++(other: SpaceSaver[T]): SpaceSaver[T] = other match {
case other: SSOne[_] => add(other)
case other: SSMany[_] => merge(other)
}
}
class SpaceSaverSemigroup[T] extends Semigroup[SpaceSaver[T]] {
override def plus(x: SpaceSaver[T], y: SpaceSaver[T]): SpaceSaver[T] = x ++ y
}
| avibryant/algebird | algebird-core/src/main/scala/com/twitter/algebird/SpaceSaver.scala | Scala | apache-2.0 | 6,476 |
package com.cterm2.miniflags.common
// Design Parameters
object Metrics
{
final val Space = 0.125f // Margin from default block bound
final val InvSpace = 1.0f - Space // Inverted margin from default block bound
final val BaseHeight = 0.125f // Flag Base Height
final val Pole = 0.5f - 1.5f / 16.0f // Margin to pole face
final val FlagThickness = 0.75f / 16.0f // Flag Thickness
}
| Pctg-x8/miniflags | src/common/metrics.scala | Scala | lgpl-2.1 | 400 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2012-2014 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.model.definition.ui.dialog.eltemed
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.model.definition.Default
import org.eclipse.jface.viewers.{ CellEditor, CellLabelProvider, EditingSupport, TableViewer, TextCellEditor, ViewerCell }
import org.eclipse.swt.graphics.Point
object ColumnGroup extends XLoggable {
class TLabelProvider extends CellLabelProvider {
override def update(cell: ViewerCell) = cell.getElement() match {
case item: ElementTemplateEditor.Item ⇒
cell.setText(item.group)
item.groupError.foreach(err ⇒ cell.setImage(err._2))
case unknown ⇒
log.fatal("Unknown item " + unknown.getClass())
}
override def getToolTipText(element: AnyRef): String = element match {
case item: ElementTemplateEditor.Item ⇒
item.groupError match {
case Some(error) ⇒ error._1
case None ⇒ null
}
case unknown ⇒
log.fatal("Unknown item " + unknown.getClass())
null
}
override def getToolTipShift(obj: Object): Point = Default.toolTipShift
override def getToolTipDisplayDelayTime(obj: Object): Int = Default.toolTipDisplayDelayTime
override def getToolTipTimeDisplayed(obj: Object): Int = Default.toolTipTimeDisplayed
}
class TEditingSupport(viewer: TableViewer, container: ElementTemplateEditor) extends EditingSupport(viewer) {
override protected def getCellEditor(element: AnyRef): CellEditor = new TextCellEditor(viewer.getTable())
override protected def canEdit(element: AnyRef): Boolean = false
override protected def getValue(element: AnyRef): AnyRef = element match {
case item: ElementTemplateEditor.Item ⇒
item.id
case unknown ⇒
log.fatal("Unknown item " + unknown.getClass())
""
}
override protected def setValue(element: AnyRef, value: AnyRef): Unit = element match {
case before: ElementTemplateEditor.Item ⇒
val group = value.asInstanceOf[String].trim
if (before.group != group) {
val after = before.copy(group = group)
container.updateActualProperty(before, container.validateItem(after))
}
case unknown ⇒
log.fatal("Unknown item " + unknown.getClass())
}
}
}
| digimead/digi-TABuddy-desktop | part-model-definition/src/main/scala/org/digimead/tabuddy/desktop/model/definition/ui/dialog/eltemed/ColumnGroup.scala | Scala | agpl-3.0 | 4,568 |
//https://www.hackerrank.com/challenges/swap-nodes
object SwapNodes extends App {
sealed trait Tree {
def inOrder: List[Int] = this match {
case Empty => Nil
case Node(left, value, right) => left.inOrder ++ (value :: right.inOrder)
//case Node(left, value, right) => (value :: left.preOrder) ++ right.preOrder // preOrder
//case Node(left, value, right) => left.postOrder ++ right.postOrder ++ List(value) // postOrder
}
}
case object Empty extends Tree
case class Node(left: Tree, value: Int, right: Tree) extends Tree
object Tree {
def build(value: Int, nodes: Map[Int, (Int, Int)]):Tree = nodes(value) match {
case (-1, -1) => Node(Empty, value, Empty)
case (-1, right) => Node(Empty, value, build(right, nodes))
case (left, -1) => Node(build(left, nodes), value, Empty)
case (left, right) => Node(build(left, nodes), value, build(right, nodes))
}
def swap(depth: Int, tree: Tree):Tree = {
def swap(level: Int, tree: Tree): Tree = tree match {
case Empty => Empty
case Node(left, value, right) if level % depth == 0 => Node(swap(level + 1, right), value, swap(level + 1, left)) // swap!
case Node(left, value, right) => Node(swap(level + 1, left), value, swap(level + 1, right))
}
swap(1, tree)
}
}
val lines = io.Source.stdin.getLines
val nNodes = lines.take(1).toList(0).toInt // number of nodes
require(1 <= nNodes && nNodes <= 1024)
val nodes = lines.take(nNodes).toList.map(_.split(" ").map(_.toInt)).zipWithIndex.map{case (xs, i) => (i + 1 -> (xs(0), xs(1))) }.toMap
//println(nodes)
var tree = Tree.build(1, nodes) // build the tree
//println(tree.inOrder.mkString(" "))
val nSwaps = lines.take(1).toList(0).toInt // number of swaps
require(1 <= nSwaps && nSwaps <= 100)
val dephs = lines.take(nSwaps).toList.map(_.toInt)
for (depth <- dephs) {
tree = Tree.swap(depth, tree)
println(tree.inOrder.mkString(" "))
}
} | flopezlasanta/hackerrank | src/functional_programming/functional_structures/SwapNodes.scala | Scala | mit | 1,915 |
package io.buoyant.config
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.jsontype.NamedType
trait ConfigInitializer {
def configClass: Class[_]
def configId: String = configClass.getName
lazy val namedType = new NamedType(configClass, configId)
def registerSubtypes(mapper: ObjectMapper): Unit =
mapper.registerSubtypes(namedType)
}
| denverwilliams/linkerd | config/src/main/scala/io/buoyant/config/ConfigInitializer.scala | Scala | apache-2.0 | 392 |
package lore.compiler.build
import lore.compiler.core.Position
import lore.compiler.feedback.{Feedback, Reporter}
import java.nio.file.{Files, Path}
object SdkDirectory {
case class SdkNotFound(path: Path) extends Feedback.Error(Position.unknown) {
override def message: String = s"The SDK path `$path` does not exist or is not a directory."
}
case class PyramidNotFound(path: Path) extends Feedback.Error(Position.unknown) {
override def message: String = s"The SDK at `$path` does not contain the Pyramid standard library in a sub-directory `pyramid`."
}
case class RuntimeNotFound(path: Path) extends Feedback.Error(Position.unknown) {
override def message: String = s"The SDK at `$path` does not contain the runtime in a sub-directory `runtime`."
}
def verify(sdkDirectory: Path)(implicit reporter: Reporter): Unit = {
if (!Files.isDirectory(sdkDirectory)) {
reporter.error(SdkNotFound(sdkDirectory))
} else {
def ensureDirectoryExists(directoryName: String, error: Path => Feedback.Error): Unit = {
if (!Files.isDirectory(sdkDirectory.resolve(directoryName))) {
reporter.error(error(sdkDirectory))
}
}
ensureDirectoryExists("pyramid", PyramidNotFound)
ensureDirectoryExists("runtime", RuntimeNotFound)
}
}
}
| marcopennekamp/lore | compiler/src/lore/compiler/build/SdkDirectory.scala | Scala | mit | 1,318 |
package tool.swf
import java.io._
import com.jpexs.decompiler.flash.tags.{DefineSpriteTag, PlaceObject2Tag}
import models.db.CellPosition
import scala.collection.JavaConverters._
import scala.collection.breakOut
case class MapData(bytes: Array[Byte], cells: Seq[Cell])
object MapData {
def fromFile(file: File): Option[MapData] = {
val swf = WrappedSWF.fromFile(file)
for {
image <- getImage(swf)
cells = getCells(swf)
} yield MapData(image, cells)
}
private def getImage(swf: WrappedSWF): Option[Array[Byte]] = {
swf.getJPEG3s.find { case (_, jpeg) =>
!jpeg.getCharacterExportFileName.contains("Enemy")
}.flatMap { case (_, jpeg) =>
WrappedSWF.imageToBytes(jpeg)
}
}
private def getCells(swf: WrappedSWF): Seq[Cell] = {
swf.getSprites.map { case (i, sprite) =>
Cell.fromTag(sprite)
}.flatMap(identity)(breakOut)
}
}
case class Cell(cell: Int, posX: Int, posY: Int) {
def toCellPosition(areaId: Int, infoNo: Int) = CellPosition(areaId, infoNo, cell, posX, posY)
}
object Cell {
val LineRegex = """line(\\d+)""".r
def fromTag(tag: DefineSpriteTag): Seq[Cell] = {
val subtags = tag.getSubTags.asScala
subtags.collect {
case obj: PlaceObject2Tag =>
for {
name <- Option(obj.name)
matcher <- LineRegex.findFirstMatchIn(name)
} yield {
Cell(matcher.group(1).toInt, obj.matrix.translateX / 20, obj.matrix.translateY / 20)
}
}.flatten
}
}
| Moesugi/MFG | server/app/tool/swf/MapData.scala | Scala | mit | 1,494 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.approval.accountsApproval.accountsApproval
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.approval.boxes.AC8091
import uk.gov.hmrc.ct.accounts.{AccountsFreeTextValidationFixture, MockFrs102AccountsRetriever}
import uk.gov.hmrc.ct.accounts.frs102.retriever.{AbridgedAccountsBoxRetriever, Frs102AccountsBoxRetriever}
import uk.gov.hmrc.ct.box.CtValidation
class AC8091Spec extends WordSpec
with MockitoSugar
with Matchers
with MockFrs102AccountsRetriever
with AccountsFreeTextValidationFixture[Frs102AccountsBoxRetriever] {
"AC8091 validate" should {
"return errors when AC8091 is empty" in {
val mockBoxRetriever = mock[AbridgedAccountsBoxRetriever]
AC8091(None).validate(mockBoxRetriever) shouldBe Set(CtValidation(Some("AC8091"), "error.AC8091.required"))
}
"return errors when AC8091 is false" in {
val mockBoxRetriever = mock[AbridgedAccountsBoxRetriever]
AC8091(Some(false)).validate(mockBoxRetriever) shouldBe Set(CtValidation(Some("AC8091"), "error.AC8091.required"))
}
"return value when AC8091 is true" in {
val mockBoxRetriever = mock[AbridgedAccountsBoxRetriever]
AC8091(Some(true)).value shouldBe Some(true)
}
}
}
| pncampbell/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/approval/accountsApproval/accountsApproval/AC8091Spec.scala | Scala | apache-2.0 | 1,908 |
package org.jetbrains.plugins.scala.lang.parameterInfo.functionParameterInfo
class FunctionParameterInfoUpdateTest extends FunctionParameterInfoTestBase {
override def getTestDataPath: String =
s"${super.getTestDataPath}update/"
def testGenericUpdate() = doTest()
def testNoUpdate() = doTest()
def testUpdateOnly() = doTest()
} | ilinum/intellij-scala | test/org/jetbrains/plugins/scala/lang/parameterInfo/functionParameterInfo/FunctionParameterInfoUpdateTest.scala | Scala | apache-2.0 | 344 |
package org.scalacvx
import org.scalacvx.atoms.affine.AddAtom
import org.scalacvx.atoms._
import org.scalatest.{Matchers, FlatSpec}
/**
* Created by lorenzo on 9/6/15.
*/
class ExpressionTests extends FlatSpec with Matchers {
"The sum of two affine expression" should "be an affine expression" in {
val x = Variable()
val y = Variable()
//x shouldBe an [Expression[Affine]]
//y shouldBe an [Expression[Affine]]
//AddAtom(x, y) shouldBe an [Expression[Affine]]
//x + y shouldBe an [Expression[Affine]]
}
"An affine expression" should "by definition, be also convex" in {
val x = Variable()
//x shouldBe an [Expression[Affine]]
//x shouldBe an [Expression[Convex]]
}
"A convex expression" should "in the general case, not be affine" in {
val x = Variable()
//AbsAtom(x) shouldBe an [Expression[Convex]]
//AbsAtom(x) should not be an [Expression[Affine]]
}
}
| lorenzolucido/ScalaCVX | src/test/scala/org/scalacvx/ExpressionTests.scala | Scala | mit | 925 |
package controllers
import javax.inject.Inject
import controllers.conversion._
import controllers.conversion.Converter._
import org.scalarules.engine.{Context, FactEngine}
import org.scalarules.facts.Fact
import org.scalarules.service.dsl.BusinessService
import play.api.data.validation.ValidationError
import play.api.libs.json._
import play.api.mvc.{Action, Controller, Request, Result}
import services.{BusinessServicesService, DerivationsService, GlossariesService, JsonConversionMapsService}
import scala.util.{Failure, Success, Try}
// scalastyle:off public.methods.have.type
class RestController @Inject() (businessServicesService: BusinessServicesService,
derivationsService: DerivationsService,
glossariesService: GlossariesService,
jsonConversionMapsService: JsonConversionMapsService) extends Controller {
val endpoints: Try[JsObject] = businessServicesService.businessServiceNames.map(
businessServiceName => JsObject(Map(("/api/run/group/" + businessServiceName) -> Json.toJson("/api/run/group/information/" + businessServiceName)))
) match {
case Nil => Failure(new IllegalStateException("No endpoints available: it seems no BusinessServices have been defined!"))
case jsObjectList: List[JsObject] => Success(jsObjectList.reduceLeft(_ ++ _))
}
/**
* @return a list of JsObjects where the first value is the endpoint and the second value is the information endpoint for all available BusinessServices
* or an InternalServerError(500) if no BusinessServices have been found as this suggests a configuration error.
*/
def businessservices = Action(
endpoints match {
case f: Failure[JsObject] => InternalServerError(f.exception.getMessage)
case s: Success[JsObject] => Ok(s.value)
}
)
/**
* provides information on verplichteInvoer, uitvoer and optioneleFacts
* @param name: the name of the BusinessService for which
* @return
*/
def information(name: String) = Action {
findBusinessService(name) match {
case f: Failure[(String, BusinessService)] => BadRequest(JsError.toJson(JsError(ValidationError(f.exception.getMessage))))
case s: Success[(String, BusinessService)] => Ok(
JsObject(Map(
"Information for Business Service " + s.value._1 ->
JsObject(Map(
"verplichteInvoer" -> contextToJson(s.value._2.verplichteInvoerFacts.map(f => f -> ("type " + f.valueType)).toMap, jsonConversionMap),
"optioneleInvoer met bijbehorende defaults" -> contextToJson(s.value._2.optioneleInvoerFacts, jsonConversionMap),
"uitvoer" -> contextToJson(s.value._2.uitvoerFacts.map(f => f -> ("type " + f.valueType)).toMap, jsonConversionMap)))
)))
}
}
/**
* Attempts to run the derivations specified by the named BusinessService with the JSON context provided.
* Will provide clear error information on all detected issues. Otherwise will provide the provided inputs and the outputs.
* @param name: the name of the BusinessService whose derivations are meant to be run, currently case sensitive
* @return the provided inputs and the specified outputs, nicely sorted.
*/
def runBusinessService(name: String) = Action(parse.json) {
request =>
findBusinessService(name) match {
case f: Failure[(String, BusinessService)] => BadRequest(JsError.toJson(JsError(ValidationError(f.exception.getMessage))))
case s: Success[(String, BusinessService)] => runBusiness(request, InputsAndOutputsResponseJsObject, s.value._2)
}
}
/**
* Attempts to run the derivations specified by the named BusinessService with the JSON context provided.
* Will provide clear error information on all detected issues. Otherwise will provide the provided context, all intermediary results and the outputs.
* @param name: the name of the BusinessService whose derivations are meant to be run, currently case sensitive
* @return The inputs, intermediary results and outputs, nicely sorted.
*/
def debugBusinessService(name: String) = Action(parse.json) {
request =>
findBusinessService(name) match {
case f: Failure[(String, BusinessService)] => BadRequest(JsError.toJson(JsError(ValidationError(f.exception.getMessage))))
case s: Success[(String, BusinessService)] => runBusiness(request, CompleteResponseJsObject, s.value._2)
}
}
/**
* Attempts to run the derivations specified by the named BusinessService with the JSON context provided.
* Will provide clear error information on all detected issues. Otherwise will provide only the specified uitvoer.
* @param name: the name of the BusinessService whose derivations are meant to be run, currently case sensitive
* @return only the outputs belonging to the BusinessService
*/
def runBusinessServiceOutputsOnly(name: String) = Action(parse.json) {
request =>
findBusinessService(name) match {
case f: Failure[(String, BusinessService)] => BadRequest(JsError.toJson(JsError(ValidationError(f.exception.getMessage))))
case s: Success[(String, BusinessService)] => runBusiness(request, OutputsOnlyResponseJsObject, s.value._2)
}
}
private def findBusinessService(name: String): Try[(String, BusinessService)] = {
val matchedBusinessServices = businessServicesService.businessServices.collect{ case (naam, service) if naam == name => (naam, service)}
matchedBusinessServices match {
case Nil => Failure(
new IllegalArgumentException("No BusinessService matched this name, make sure you have used the proper endpoint definition!" ++ businessServicesService.businessServiceNames.toString)
)
case head :: tail => tail match {
case Nil => Success(head)
case tail: List[(String, BusinessService)] => Failure(
new IllegalStateException("More than one BusinessService matched this name. Suspected mistake in BusinessService specifications.")
)
}
}
}
private def runBusiness(request: Request[JsValue], jsonResponseProvider: ResponseJsObject, businessService: BusinessService): Result = {
val (initialContextFragments: List[JsSuccess[Context]], conversionErrors: List[JsError]) = {
convertToIndividualContext(request.body, businessService.glossaries.foldLeft(Map.empty[String, Fact[Any]])((acc, glossary) => acc ++ glossary.facts), jsonConversionMap)
}
if (conversionErrors != List.empty) BadRequest( processConversionErrors(conversionErrors) )
else processConvertedContextBusinessService(initialContextFragments, jsonResponseProvider, businessService)
}
/**
* Provides a REST endpoint for triggering all derivations in the target project. Any fact definitions available in the target project's glossaries
* can be provided in the JSON request body like so:
* {
* "factOfTypeString": "factText",
* "factOfTypeBedrag": 234,
* "factOfTypeBigDecimal": 234,
* "factOfTypePercentage": 234
* }
*
* @return A JsonObject containing either:
* - A list of JsErrors, containing complete error information concerning failed conversions from json to context (if multiple errors occur, you receive information on all of them)
* - A JsObject containing one JsObject: "facts", which contains the combined information of "input" and "results"
*/
def runAll = Action(parse.json) { request =>
run(request, RunAllResponseJsObject)
}
/**
* As #runAll except:
*
* @return A JsonObject containing either:
* - A list of JsErrors, containing complete error information concerning failed conversions from json to context (if multiple errors occur, you receive information on all of them)
* - A JsObject containing two JsObject: "input" and "results", which contains only the information of "results"
*/
def runAllDebug = Action(parse.json) { request =>
run(request, DebugAllResponseJsObject)
}
/**
* As #runAll except:
*
* @return A JsonObject containing either:
* - A list of JsErrors, containing complete error information concerning failed conversions from json to context (if multiple errors occur, you receive information on all of them)
* - A JsObject containing one JsObject: "results", which contains only the information of "results"
*/
def runAllResultsOnly = Action(parse.json) { request =>
run(request, RunAllResultsOnlyResponseJsObject)
}
val jsonConversionMap: JsonConversionsProvider = jsonConversionMapsService.mergedJsonConversionMap
private def run(request: Request[JsValue], jsonResponseProvider: ResponseJsObject) = {
val (initialContextFragments: List[JsSuccess[Context]], conversionErrors: List[JsError]) =
convertToIndividualContext(request.body, glossariesService.mergedGlossaries, jsonConversionMap)
if (conversionErrors != List.empty) BadRequest( processConversionErrors(conversionErrors) )
else Ok( processConvertedContext(initialContextFragments, Nil, jsonResponseProvider) )
}
private def processConversionErrors(conversionErrors: List[JsError]): JsObject = JsError.toJson(conversionErrors.reduceLeft(_ ++ _))
private def processConvertedContext(initialContextFragments: List[JsSuccess[Context]], uitvoerFacts: List[Fact[Any]], jsonResponse: ResponseJsObject): JsObject = {
val initialContext: Context = initialContextFragments.foldLeft(Map.empty[Fact[Any], Any])((acc, jsSuccess) => acc ++ jsSuccess.get)
val resultContext: Context = RulesRunner.run(initialContext, derivationsService.topLevelDerivations)
jsonResponse.toJson(initialContext = initialContext, uitvoerFacts = uitvoerFacts, resultContext = resultContext, jsonConversionMap)
}
private def processConvertedContextBusinessService(initialContextFragments: List[JsSuccess[Context]], jsonResponse: ResponseJsObject, businessService: BusinessService): Result = {
val initialContext: Context = initialContextFragments.foldLeft(Map.empty[Fact[Any], Any])((acc, jsSuccess) => acc ++ jsSuccess.get)
val resultContext: Try[Context] = businessService.run(initialContext, FactEngine.runNormalDerivations)
resultContext match {
case f: Failure[Context] => BadRequest( JsError.toJson(JsError(ValidationError("Attempt at calculation failed due to validation errors: " + f.exception.getMessage))) )
case s: Success[Context] => Ok(
jsonResponse.toJson(
initialContext = initialContext,
uitvoerFacts = businessService.uitvoerFacts,
resultContext = s.value,
jsonConversionMap
)
)
}
}
}
| scala-rules/rule-rest | app/controllers/RestController.scala | Scala | mit | 10,737 |
package com.sksamuel.elastic4s.indexes
import com.sksamuel.elastic4s.JsonSugar
import com.sksamuel.elastic4s.analyzers.{CustomAnalyzerDefinition, KeywordTokenizer, LowercaseTokenFilter}
import com.sksamuel.elastic4s.http.ElasticDsl
import org.elasticsearch.common.settings.Settings
import org.scalatest.{Matchers, WordSpec}
class CreateIndexTemplateDefinitionShowTest extends WordSpec with Matchers with JsonSugar with ElasticDsl {
"CreateIndexTemplateDefinition" should {
"have a show typeclass implementation" in {
val req =
createTemplate("matchme.*").pattern("matchme.*").mappings(
mapping("characters").fields(
stringField("name"),
stringField("location")
)
).analysis(
CustomAnalyzerDefinition(
"default",
KeywordTokenizer,
LowercaseTokenFilter
)
)
.order(1)
.settings(Settings.builder().put("number_of_shards",4).build())
CreateIndexTemplateShow.show(req) should matchJson("""{"template":"matchme.*","order":1,"settings":{"number_of_shards":"4","analysis":{"analyzer":{"default":{"type":"custom","tokenizer":"keyword","filter":["lowercase"]}}}},"mappings":{"characters":{"properties":{"name":{"type":"string"},"location":{"type":"string"}}}}}""")
}
}
}
| aroundus-inc/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/indexes/CreateIndexTemplateDefinitionShowTest.scala | Scala | apache-2.0 | 1,328 |
package mesosphere.marathon
package core.readiness
import mesosphere.UnitTest
import mesosphere.marathon.core.instance.TestTaskBuilder
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.state.NetworkInfo
import mesosphere.marathon.state.{ AppDefinition, PathId, PortDefinition }
class ReadinessCheckSpecTest extends UnitTest {
"ReadinessCheckSpec" should {
"readiness check specs for one task with dynamic ports and one readiness check" in {
val f = new Fixture
Given("an app with a readiness check and a randomly assigned port")
val app = f.appWithOneReadinessCheck
And("a task with two host port")
val task = f.taskWithPorts
When("calculating the ReadinessCheckSpec")
val specs = ReadinessCheckExecutor.ReadinessCheckSpec.readinessCheckSpecsForTask(app, task)
Then("we get one spec")
specs should have size 1
val spec = specs.head
And("it has the correct url")
spec.url should equal(s"http://${f.hostName}:80/")
And("the rest of the fields are correct, too")
spec should equal(
ReadinessCheckExecutor.ReadinessCheckSpec(
url = s"http://${f.hostName}:80/",
taskId = task.taskId,
checkName = app.readinessChecks.head.name,
interval = app.readinessChecks.head.interval,
timeout = app.readinessChecks.head.timeout,
httpStatusCodesForReady = app.readinessChecks.head.httpStatusCodesForReady,
preserveLastResponse = app.readinessChecks.head.preserveLastResponse
)
)
}
"readiness check specs for one task with a required port and one readiness check" in {
val f = new Fixture
Given("an app with a readiness check and a fixed port assignment")
val app = f.appWithOneReadinessCheckWithRequiredPorts
And("a task with two host ports")
val task = f.taskWithPorts
When("calculating the ReadinessCheckSpec")
val specs = ReadinessCheckExecutor.ReadinessCheckSpec.readinessCheckSpecsForTask(app, task)
Then("we get one spec")
specs should have size 1
val spec = specs.head
And("it has the correct url")
spec.url should equal(s"http://${f.hostName}:80/")
And("the rest of the fields are correct, too")
spec should equal(
ReadinessCheckExecutor.ReadinessCheckSpec(
url = s"http://${f.hostName}:80/",
taskId = task.taskId,
checkName = app.readinessChecks.head.name,
interval = app.readinessChecks.head.interval,
timeout = app.readinessChecks.head.timeout,
httpStatusCodesForReady = app.readinessChecks.head.httpStatusCodesForReady,
preserveLastResponse = app.readinessChecks.head.preserveLastResponse
)
)
}
"multiple readiness check specs" in {
val f = new Fixture
Given("an app with two readiness checks and randomly assigned ports")
val app = f.appWithMultipleReadinessChecks
And("a task with two host port")
val task = f.taskWithPorts
When("calculating the ReadinessCheckSpec")
val specs = ReadinessCheckExecutor.ReadinessCheckSpec.readinessCheckSpecsForTask(app, task)
Then("we get two specs in the right order")
specs should have size 2
val specDefaultHttp = specs.head
val specAlternative = specs(1)
specDefaultHttp.checkName should equal(app.readinessChecks.head.name)
specAlternative.checkName should equal(app.readinessChecks(1).name)
And("the default http spec has the right url")
specDefaultHttp.url should equal(s"http://${f.hostName}:81/")
And("the alternative https spec has the right url")
specAlternative.url should equal(s"https://${f.hostName}:80/v1/plan")
}
}
class Fixture {
val appId: PathId = PathId("/test")
val hostName = "some.host"
val appWithOneReadinessCheck = AppDefinition(
id = appId,
readinessChecks = Seq(ReadinessCheckTestHelper.defaultHttp),
portDefinitions = Seq(
PortDefinition(
port = AppDefinition.RandomPortValue,
name = Some("http-api")
)
)
)
val appWithOneReadinessCheckWithRequiredPorts = AppDefinition(
id = appId,
readinessChecks = Seq(ReadinessCheckTestHelper.defaultHttp),
requirePorts = true,
portDefinitions = Seq(
PortDefinition(
port = 80,
name = Some(ReadinessCheckTestHelper.defaultHttp.portName)
),
PortDefinition(
port = 81,
name = Some("foo")
)
)
)
val appWithMultipleReadinessChecks = AppDefinition(
id = appId,
readinessChecks = Seq(
ReadinessCheckTestHelper.defaultHttp,
ReadinessCheckTestHelper.alternativeHttps
),
portDefinitions = Seq(
PortDefinition(
port = 1, // service ports, ignore
name = Some(ReadinessCheckTestHelper.alternativeHttps.portName)
),
PortDefinition(
port = 2, // service ports, ignore
name = Some(ReadinessCheckTestHelper.defaultHttp.portName)
)
)
)
def taskWithPorts = {
val task: Task.LaunchedEphemeral = TestTaskBuilder.Helper.runningTaskForApp(appId)
task.copy(status = task.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Seq(80, 81), ipAddresses = Nil)))
}
}
}
| Caerostris/marathon | src/test/scala/mesosphere/marathon/core/readiness/ReadinessCheckSpecTest.scala | Scala | apache-2.0 | 5,410 |
package controllers.stateless
import controllers.BaseAuthConfig
import play.api.mvc.RequestHeader
import play.api.mvc.Results._
import scala.concurrent.{Future, ExecutionContext}
import com.github.tototoshi.play2.auth.{CookieIdContainer, AsyncIdContainer}
trait AuthConfigImpl extends BaseAuthConfig {
def loginSucceeded(request: RequestHeader)(implicit ctx: ExecutionContext) = Future.successful(Redirect(routes.Messages.main))
def logoutSucceeded(request: RequestHeader)(implicit ctx: ExecutionContext) = Future.successful(Redirect(routes.Sessions.login))
def authenticationFailed(request: RequestHeader)(implicit ctx: ExecutionContext) = Future.successful(Redirect(routes.Sessions.login))
override lazy val idContainer = AsyncIdContainer(new CookieIdContainer[Id])
} | tototoshi/play2-auth | sample/app/controllers/stateless/AuthConfigImpl.scala | Scala | apache-2.0 | 785 |
package mesosphere.mesos
import mesosphere.UnitTest
import org.apache.mesos
class VolumeProfileMatcherTest extends UnitTest {
"matchesProfileName" should {
"match disk with profile if that profile is required" in {
val disk = diskResource(profile = Some("profile"))
VolumeProfileMatcher.matchesProfileName(Some("profile"), disk) shouldBe true
}
"not match disk with profile if no profile is required" in {
val disk = diskResource(profile = Some("profile"))
VolumeProfileMatcher.matchesProfileName(None, disk) shouldBe false
}
"match disk without profile when no profile is required" in {
val disk = diskResource(profile = None)
VolumeProfileMatcher.matchesProfileName(None, disk) shouldBe true
}
"not match disk with profile if a different profile is required" in {
val disk = diskResource(profile = Some("profile"))
VolumeProfileMatcher.matchesProfileName(Some("needed-profile"), disk) shouldBe false
}
}
/** Helper to create disk resources with/without profile */
def diskResource(profile: Option[String]): mesos.Protos.Resource = {
val source = mesos.Protos.Resource.DiskInfo.Source.newBuilder()
.setType(mesos.Protos.Resource.DiskInfo.Source.Type.PATH)
.setPath(mesos.Protos.Resource.DiskInfo.Source.Path.newBuilder().setRoot("test"))
.setId("pathDiskId")
profile.foreach { p =>
source.setProfile(p)
source.setVendor("vendorId")
}
mesos.Protos.Resource.newBuilder()
.setType(mesos.Protos.Value.Type.SCALAR)
.setName("disk")
.setDisk(mesos.Protos.Resource.DiskInfo.newBuilder()
.setSource(source))
.build()
}
}
| gsantovena/marathon | src/test/scala/mesosphere/mesos/VolumeProfileMatcherTest.scala | Scala | apache-2.0 | 1,686 |
/**
* Copyright © 2013, Adam Retter
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package skullfuck
object BrainfuckVM extends App {
val vm = new BrainfuckParser()
val result = vm.apply("+++.>+++.")
println(result)
}
| adamretter/Skullfuck | src/main/scala/skullfuck/BrainfuckVM.scala | Scala | bsd-3-clause | 1,721 |
package com.geeksville.apiproxy
import java.util.UUID
import com.geeksville.dapi.Envelope
/**
* These are low level routines called by the GCS to hook into the proxy. When
* the proxy calls in the expected sequence of operations are:
*
* loginUser
*
* setVehicleId (must be done before any data is sent from that vehicle)
*
* filterMavlink (for each packet)
*
* @author kevinh
*
*/
trait GCSHooks {
/**
* Provide the callbacks for the GCS. GCS must call this routine before
* calling any other API functions.
*
* @param cb
*/
def setCallback(cb: GCSCallback)
/**
* GCS must call this for ever mavlink packet received or sent from the
* vehicle
*
* @param bytes
* the packet
* @param fromInterface
* the interface # this data arrived on (or -1 if generated by
* the GCS itself)
* @throws IOException
*/
def filterMavlink(fromInterface: Int, bytes: Array[Byte])
/**
* Connect to web service
*
* @param userName
* @param password
* @throws LoginException if login fails
*/
def loginUser(userName: String, password: String)
/// Ask server if the specified username is available for creation
def isUsernameAvailable(userName: String): Boolean
/// Create a new user account
/// @throws LoginException if login fails
def createUser(userName: String, password: String, email: Option[String])
/// Send an arbitrary envelope
def send(env: Envelope)
/// Begin a new mission
def startMission(keep: Boolean, uuid: UUID)
/// End a mission
def stopMission(keep: Boolean)
/**
* Associate a server vehicleId string with a particular mavlink sysId. GCS
* must call this for every vehicle that is connected.
*
* @param vehicleId
* a UUID for this vehicle, if the server has never seen this
* UUID before, a new vehicle record will be created. Use the
* special string "gcs" for data from the GCS (not really a
* vehicle)
* @param fromInterface
* the interface # this vehicle is connected on
* @param mavlinkSysId
* the mavlink sysid for this vehicle
* @param allowControl
* true if we will allow the server to control this vehicle
* @param wantPipe true if we want to use this as an alias for some remote vehicle
* @throws IOException
*/
def setVehicleId(vehicleId: String, fromInterface: Int, mavlinkSysId: Int, allowControl: Boolean, wantPipe: Option[Boolean] = None)
/**
* Send any queued messages immedately
*/
def flush()
/**
* Disconnects from web service
*/
def close()
}
| geeksville/arduleader | common/src/main/scala/com/geeksville/apiproxy/GCSHooks.scala | Scala | gpl-3.0 | 2,668 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.schema
import org.apache.flink.table.planner.plan.stats.FlinkStatistic
import org.apache.calcite.schema.TemporalTable
import org.apache.calcite.schema.impl.AbstractTable
/**
* Base class for flink table.
*/
abstract class FlinkTable extends AbstractTable with TemporalTable {
/**
* Restrict return type of statistic to FlinkStatistic.
*/
override def getStatistic: FlinkStatistic = ???
/**
* Creates a copy of this table, changing statistic.
*
* @param statistic A new FlinkStatistic.
* @return Copy of this table, substituting statistic.
*/
def copy(statistic: FlinkStatistic): FlinkTable
/**
* Currently we do not need this, so we hard code it as default.
*/
override def getSysStartFieldName: String = "sys_start"
/**
* Currently we do not need this, so we hard code it as default.
*/
override def getSysEndFieldName: String = "sys_end"
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/schema/FlinkTable.scala | Scala | apache-2.0 | 1,764 |
/*
* Copyright (C) 2015 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.sword2
import java.io.File
import java.util.regex.Pattern
import org.scalatest.Inside.inside
import scala.util.{ Failure, Success }
class ResolveFetchItemsSpec extends Sword2Fixture with BagStoreFixture {
val INPUT_BASEDIR = new File("src/test/resources/input")
val SIMPLE_SEQUENCE_A = new File(INPUT_BASEDIR, "bag-sequence/a")
val SIMPLE_SEQUENCE_B = new File(INPUT_BASEDIR, "bag-sequence/b")
val SIMPLE_SEQUENCE_C = new File(INPUT_BASEDIR, "bag-sequence/c")
val REQUIRED_FILE_MISSING = new File(INPUT_BASEDIR, "bag-sequence/missing-required-file")
val FETCH_ITEM_FILE_MISSING = new File(INPUT_BASEDIR, "bag-sequence/file-missing-in-fetch-text")
val INCORRECT_CHECKSUM = new File(INPUT_BASEDIR, "bag-sequence/incorrect-checksum")
val NONEXISTENT_FETCH_ITEM_PATH = new File(INPUT_BASEDIR, "bag-sequence/nonexistent-fetchtext-path")
val FETCH_ITEM_ALREADY_IN_BAG = new File(INPUT_BASEDIR, "bag-sequence/fetch-item-already-in-bag")
val URL_OUTSIDE_BAGSTORE_BAG = new File(INPUT_BASEDIR, "url-outside-bagstore-bag")
val INVALID_URL_BAG = new File(INPUT_BASEDIR, "invalid-url-bag")
val NOT_ALLOWED_URL_BAG = new File(INPUT_BASEDIR, "not-allowed-url-bag")
val NO_DATA_BAG = new File(INPUT_BASEDIR, "empty-bag")
val urlPattern: Pattern = Pattern.compile("^https?://.*")
"resolveFetchItems" should "result in a Success with a valid bag without a fetch.txt" in {
copyToTargetBagDir(SIMPLE_SEQUENCE_A)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
DepositHandler.checkBagVirtualValidity(targetBagDir) shouldBe a[Success[_]]
}
it should "result in a Success with a valid bag with a fetch.txt" in {
copyToTargetBagDir(SIMPLE_SEQUENCE_B)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
DepositHandler.checkBagVirtualValidity(targetBagDir) shouldBe a[Success[_]]
}
it should "result in a Success with another valid bag with a fetch.txt" in {
copyToTargetBagDir(SIMPLE_SEQUENCE_C)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
DepositHandler.checkBagVirtualValidity(targetBagDir) shouldBe a[Success[_]]
}
it should "result in a Failure when a required file is missing" in {
copyToTargetBagDir(REQUIRED_FILE_MISSING)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
val validity = DepositHandler.checkBagVirtualValidity(targetBagDir)
inside(validity) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("Bag does not have bagit.txt")
}
}
it should "result in a Failure when a file is missing in the fetch.txt" in {
copyToTargetBagDir(FETCH_ITEM_FILE_MISSING)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
val validity = DepositHandler.checkBagVirtualValidity(targetBagDir)
inside(validity) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("Missing payload files not in the fetch.txt")
}
}
it should "result in a Failure when a file checksum is incorrect" in {
copyToTargetBagDir(INCORRECT_CHECKSUM)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
val validity = DepositHandler.checkBagVirtualValidity(targetBagDir)
inside(validity) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("error found in validating checksums")
}
}
it should "result in a Failure when there is a nonexistent path in the fetch.txt" in {
copyToTargetBagDir(NONEXISTENT_FETCH_ITEM_PATH)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
val validity = DepositHandler.checkBagVirtualValidity(targetBagDir)
inside(validity) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("was not found in the referred bag")
}
}
it should "result in a Failure when a file in the fetch.txt is already in the bag" in {
copyToTargetBagDir(FETCH_ITEM_ALREADY_IN_BAG)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
val validity = DepositHandler.checkBagVirtualValidity(targetBagDir)
inside(validity) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("is already present in the bag")
}
}
// TODO: PROPERLY MOCK OUT THE HTTP CALL
// it should "result in a Success with a valid fetch.txt url referring outside the bagstore" in {
// copyToTargetBagDir(URL_OUTSIDE_BAGSTORE_BAG)
// DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
// DepositHandler.checkBagVirtualValidity(targetBagDir) shouldBe a[Success[_]]
// }
it should "result in a Failure with a syntactically invalid url in the fetch.txt" in {
copyToTargetBagDir(INVALID_URL_BAG)
val fetchItemsCheck = DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern)
inside(fetchItemsCheck) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("error found in fetch.txt URLs")
}
}
it should "result in a Failure with a not allowed url in the fetch.txt" in {
copyToTargetBagDir(NOT_ALLOWED_URL_BAG)
val fetchItemsCheck = DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern)
inside(fetchItemsCheck) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("error found in fetch.txt URLs")
}
}
it should "result in a Failure with an empty bag" in {
copyToTargetBagDir(NO_DATA_BAG)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
val validity = DepositHandler.checkBagVirtualValidity(targetBagDir)
inside(validity) {
case Failure(e) =>
e shouldBe a[InvalidDepositException]
e.getMessage should include("Bag does not have any payload manifests")
}
}
it should "result in a Success when both bag-store base-dir and base-uri are not given, and there are no fetch.txt references to the bag-store" in {
implicit val bagStoreSettings: Option[BagStoreSettings] = Option.empty
copyToTargetBagDir(SIMPLE_SEQUENCE_A)
DepositHandler.checkFetchItemUrls(targetBagDir, urlPattern) shouldBe a[Success[_]]
DepositHandler.checkBagVirtualValidity(targetBagDir) shouldBe a[Success[_]]
}
}
| DANS-KNAW/easy-deposit | src/test/scala/nl.knaw.dans.easy.sword2/ResolveFetchItemsSpec.scala | Scala | apache-2.0 | 7,232 |
package com.karasiq.shadowcloud.api
import akka.util.ByteString
import com.karasiq.common.encoding.Base64
import com.karasiq.shadowcloud.model.utils.IndexScope
import com.karasiq.shadowcloud.model.{File, Path}
import scala.language.higherKinds
object SCApiEncoding {
def toUrlSafe(data: ByteString): String =
Base64.encode(data)
def toBinary(string: String): ByteString =
Base64.decode(string)
}
trait SCApiEncoding {
type Encoder[T]
type Decoder[T]
// Generic encoding
def encode[T: Encoder](value: T): ByteString
def decode[T: Decoder](valueBytes: ByteString): T
// Static encoding
def encodePath(path: Path): ByteString
def decodePath(pathBytes: ByteString): Path
def encodeFile(file: File): ByteString
def decodeFile(fileBytes: ByteString): File
def encodeFiles(files: Seq[File]): ByteString
def decodeFiles(filesBytes: ByteString): Seq[File]
def encodeScope(scope: IndexScope): ByteString
def decodeScope(scopeBytes: ByteString): IndexScope
type ImplicitsT
val implicits: ImplicitsT
}
| Karasiq/shadowcloud | server/autowire-api/src/main/scala/com/karasiq/shadowcloud/api/SCApiEncoding.scala | Scala | apache-2.0 | 1,045 |
package io.vamp.common.config
import io.vamp.common.util.ObjectUtil
import io.vamp.common.{ Config, ConfigFilter, Namespace, NamespaceProvider }
import org.json4s.{ DefaultFormats, Formats }
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{ BeforeAndAfterEach, FlatSpec, Matchers }
@RunWith(classOf[JUnitRunner])
class ConfigSpec extends FlatSpec with Matchers with BeforeAndAfterEach with NamespaceProvider {
implicit val namespace: Namespace = Namespace("default")
private implicit val formats: Formats = DefaultFormats
override def afterEach() = Config.load(Map())
"Config" should "retrieve data from configuration" in {
Config.load(Map("a.b.c" → 456, "q" → "qwerty", "f" → true))
Config.int("a.b.c")() shouldBe 456
Config.string("q")() shouldBe "qwerty"
Config.boolean("f")() shouldBe true
}
it should "marshall input" in {
Config.marshall(
Map(
"a" → 456,
"q" → "qwerty",
"f" → true
)
) shouldBe
"""{
| "a":456,
| "q":"qwerty",
| "f":true
|}""".stripMargin
Config.marshall(
Map(
"a" → Map(
"q" → "qwerty",
"f" → true
)
)
) shouldBe
"""{
| "a":{
| "q":"qwerty",
| "f":true
| }
|}""".stripMargin
Config.marshall(
Map(
"a" → List(
Map(
"q" → "qwerty",
"f" → 5
)
)
)
) shouldBe
"""{
| "a":[
| {
| "q":"qwerty",
| "f":5
| }
| ]
|}""".stripMargin
Config.marshall(
Map(
"a.b" → 456,
"q.t" → "qwerty",
"f" → true
)
) shouldBe
"""{
| "a.b":456,
| "q.t":"qwerty",
| "f":true
|}""".stripMargin
Config.marshall(
Map(
"a.b" → Map(
"q.z" → "qwerty",
"f" → true
)
)
) shouldBe
"""{
| "a.b":{
| "q.z":"qwerty",
| "f":true
| }
|}""".stripMargin
Config.marshall(
Map(
"a.b" → List(
Map(
"q.x" → "qwerty",
"f" → 5
)
)
)
) shouldBe
"""{
| "a.b":[
| {
| "q.x":"qwerty",
| "f":5
| }
| ]
|}""".stripMargin
}
it should "unmarshall input" in {
Config.unmarshall(
"""{
| "a":456,
| "q":"qwerty",
| "f":true
|}""".stripMargin
) shouldBe
Map(
"a" → 456,
"q" → "qwerty",
"f" → true
)
Config.unmarshall(
"""{
| "a":{
| "q":"qwerty",
| "f":true
| }
|}""".stripMargin
) shouldBe
Map(
"a" → Map(
"q" → "qwerty",
"f" → true
)
)
Config.unmarshall(
"""{
| "a":[
| {
| "q":"qwerty",
| "f":5
| }
| ]
|}""".stripMargin
) shouldBe
Map(
"a" → List(
Map(
"q" → "qwerty",
"f" → 5
)
)
)
Config.unmarshall(
"""{
| "a.b":456,
| "q.t":"qwerty",
| "f":true
|}""".stripMargin
) shouldBe
Map(
"q" →
Map(
"t" → "qwerty"
),
"a" → Map(
"b" → 456
),
"f" → true
)
Config.unmarshall(
"""{
| "a.b":{
| "q.z":"qwerty",
| "f":true
| }
|}""".stripMargin
) shouldBe
Map(
"a" →
Map(
"b" → Map(
"q" → Map(
"z" → "qwerty"
),
"f" → true
)
)
)
Config.unmarshall(
"""{
| "a.b":[
| {
| "q.x":"qwerty",
| "f":5
| }
| ]
|}""".stripMargin
) shouldBe
Map(
"a" → Map(
"b" → List(
Map(
"q" → Map(
"x" → "qwerty"
),
"f" → 5
)
)
)
)
}
it should "marshall unmarshall input" in {
val input1 = Map("a" → 456, "q" → "qwerty", "f" → true)
Config.unmarshall(Config.marshall(input1)) shouldBe input1
val input2 = Map("a" → Map("q" → "qwerty", "f" → true))
Config.unmarshall(Config.marshall(input2)) shouldBe input2
val input3 = Map("a" → List(Map("q" → "qwerty", "f" → 5)))
Config.unmarshall(Config.marshall(input3)) shouldBe input3
val input4 = Map("q" → Map("t" → "qwerty"), "a" → Map("b" → 456), "f" → true)
Config.unmarshall(Config.marshall(input4)) shouldBe input4
val input5 = Map("a" → Map("b" → Map("q" → Map("z" → "qwerty"), "f" → true)))
Config.unmarshall(Config.marshall(input5)) shouldBe input5
val input6 = Map("a" → Map("b" → List(Map("q" → Map("x" → "qwerty"), "f" → 5))))
Config.unmarshall(Config.marshall(input6)) shouldBe input6
}
it should "load input" in {
val input1 = Map("a" → 456, "q" → "qwerty", "f" → true)
Config.load(Config.unmarshall(Config.marshall(input1)))
Config.export(Config.Type.dynamic, flatten = false) shouldBe input1
val input2 = Map("a" → Map("q" → "qwerty", "f" → true))
Config.load(Config.unmarshall(Config.marshall(input2)))
Config.export(Config.Type.dynamic, flatten = false) shouldBe input2
val input3 = Map("a" → List(Map("q" → "qwerty", "f" → 5)))
Config.load(Config.unmarshall(Config.marshall(input3)))
Config.export(Config.Type.dynamic, flatten = false) shouldBe input3
val input4 = Map("q" → Map("t" → "qwerty"), "a" → Map("b" → 456), "f" → true)
Config.load(Config.unmarshall(Config.marshall(input4)))
Config.export(Config.Type.dynamic, flatten = false) shouldBe input4
val input5 = Map("a" → Map("b" → Map("q" → Map("z" → "qwerty"), "f" → true)))
Config.load(Config.unmarshall(Config.marshall(input5)))
Config.export(Config.Type.dynamic, flatten = false) shouldBe input5
val input6 = Map("a" → Map("b" → List(Map("q" → Map("x" → "qwerty"), "f" → 5))))
Config.load(Config.unmarshall(Config.marshall(input6)))
Config.export(Config.Type.dynamic, flatten = false) shouldBe input6
}
it should "load input - flatten" in {
val input1 = Map("a" → 456, "q" → "qwerty", "f" → true)
val expected1 = Map("a" → 456, "q" → "qwerty", "f" → true)
Config.load(Config.unmarshall(Config.marshall(input1)))
Config.export(Config.Type.dynamic) shouldBe expected1
val input2 = Map("a" → Map("q" → "qwerty", "f" → true))
val expected2 = Map("a.q" → "qwerty", "a.f" → true)
Config.load(Config.unmarshall(Config.marshall(input2)))
Config.export(Config.Type.dynamic) shouldBe expected2
val input3 = Map("a" → List(Map("q" → "qwerty", "f" → 5)))
val expected3 = Map("a" → List(Map("q" → "qwerty", "f" → 5)))
Config.load(Config.unmarshall(Config.marshall(input3)))
Config.export(Config.Type.dynamic) shouldBe expected3
val input4 = Map("q" → Map("t" → "qwerty"), "a" → Map("b" → 456), "f" → true)
val expected4 = Map("q.t" → "qwerty", "a.b" → 456, "f" → true)
Config.load(Config.unmarshall(Config.marshall(input4)))
Config.export(Config.Type.dynamic) shouldBe expected4
val input5 = Map("a" → Map("b" → Map("q" → Map("z" → "qwerty"), "f" → true)))
val expected5 = Map("a.b.f" → true, "a.b.q.z" → "qwerty")
Config.load(Config.unmarshall(Config.marshall(input5)))
Config.export(Config.Type.dynamic) shouldBe expected5
val input6 = Map("a" → Map("b" → List(Map("q" → Map("x" → "qwerty"), "f" → 5))))
val expected6 = Map("a.b" → List(Map("q" → Map("x" → "qwerty"), "f" → 5)))
Config.load(Config.unmarshall(Config.marshall(input6)))
Config.export(Config.Type.dynamic) shouldBe expected6
}
it should "export applied" in {
val input1 = Map("a" → 456, "q" → "qwerty", "f" → true)
Config.load(Map())
val applied1 = Config.export(Config.Type.applied, flatten = false)
Config.load(input1)
Config.export(Config.Type.applied, flatten = false) shouldBe ObjectUtil.merge(input1, applied1)
val input2 = Map("a" → Map("q" → "qwerty", "f" → true))
Config.load(Map())
val applied2 = Config.export(Config.Type.applied, flatten = false)
Config.load(input2)
Config.export(Config.Type.applied, flatten = false) shouldBe ObjectUtil.merge(input2, applied2)
val input3 = Map("a" → List(Map("q" → "qwerty", "f" → 5)))
Config.load(Map())
val applied3 = Config.export(Config.Type.applied, flatten = false)
Config.load(input3)
Config.export(Config.Type.applied, flatten = false) shouldBe ObjectUtil.merge(input3, applied3)
val input4 = Map("q" → Map("t" → "qwerty"), "a" → Map("b" → 456), "f" → true)
Config.load(Map())
val applied4 = Config.export(Config.Type.applied, flatten = false)
Config.load(input4)
Config.export(Config.Type.applied, flatten = false) shouldBe ObjectUtil.merge(input4, applied4)
val input5 = Map("a" → Map("b" → Map("q" → Map("z" → "qwerty"), "f" → true)))
Config.load(Map())
val applied5 = Config.export(Config.Type.applied, flatten = false)
Config.load(input5)
Config.export(Config.Type.applied, flatten = false) shouldBe ObjectUtil.merge(input5, applied5)
val input6 = Map("a" → Map("b" → List(Map("q" → Map("x" → "qwerty"), "f" → 5))))
Config.load(Map())
val applied6 = Config.export(Config.Type.applied, flatten = false)
Config.load(input6)
Config.export(Config.Type.applied, flatten = false) shouldBe ObjectUtil.merge(input6, applied6)
}
it should "override" in {
Config.load(Map())
val input = Config.export(Config.Type.system).collect {
case (k, v: String) ⇒ k → s">>> $v <<<"
}
Config.load(Config.unmarshall(Config.marshall(input)))
val applied = Config.export(Config.Type.applied)
input.foreach { case (k, v) ⇒ (k → applied(k)) shouldBe (k → v) }
}
it should "unmarshall marshall string input" in {
def validate(input: String) = {
val source = Config.unmarshall(input)
val export = Config.marshall(source)
Config.load(source)
Config.marshall(Config.export(Config.Type.dynamic, flatten = false)) shouldBe export
}
validate("vamp.info.message: Hi!!!")
validate(
"""vamp:
| info:
| message: Hi!!!
""".stripMargin
)
}
it should "unmarshall filter marshall string input" in {
Config.unmarshall("""
|vamp.namespace: vamp
|vamp.info.message: Hi!!!
""".stripMargin, ConfigFilter({ case (k, _) ⇒ k != "vamp.namespace" })) shouldBe Config.unmarshall("vamp.info.message: Hi!!!")
}
}
| magneticio/vamp | common/src/test/scala/io/vamp/common/config/ConfigSpec.scala | Scala | apache-2.0 | 11,268 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.monitoring.metrics
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.event.slf4j.SLF4JLogging
import akka.http.scaladsl.Http
import akka.kafka.ConsumerSettings
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import kamon.Kamon
import kamon.prometheus.PrometheusReporter
import org.apache.kafka.common.serialization.StringDeserializer
import pureconfig._
import pureconfig.generic.auto._
import scala.concurrent.{ExecutionContext, Future}
object OpenWhiskEvents extends SLF4JLogging {
case class MetricConfig(port: Int, enableKamon: Boolean, ignoredNamespaces: Set[String])
def start(config: Config)(implicit system: ActorSystem,
materializer: ActorMaterializer): Future[Http.ServerBinding] = {
implicit val ec: ExecutionContext = system.dispatcher
val prometheusReporter = new PrometheusReporter()
Kamon.registerModule("prometheus", prometheusReporter)
Kamon.init(config)
val metricConfig = loadConfigOrThrow[MetricConfig](config, "whisk.user-events")
val prometheusRecorder = PrometheusRecorder(prometheusReporter)
val recorders = if (metricConfig.enableKamon) Seq(prometheusRecorder, KamonRecorder) else Seq(prometheusRecorder)
val eventConsumer = EventConsumer(eventConsumerSettings(defaultConsumerConfig(config)), recorders, metricConfig)
CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "shutdownConsumer") { () =>
eventConsumer.shutdown()
}
val port = metricConfig.port
val api = new PrometheusEventsApi(eventConsumer, prometheusRecorder)
val httpBinding = Http().bindAndHandle(api.routes, "0.0.0.0", port)
httpBinding.foreach(_ => log.info(s"Started the http server on http://localhost:$port"))(system.dispatcher)
httpBinding
}
def eventConsumerSettings(config: Config): ConsumerSettings[String, String] =
ConsumerSettings(config, new StringDeserializer, new StringDeserializer)
def defaultConsumerConfig(globalConfig: Config): Config = globalConfig.getConfig("akka.kafka.consumer")
}
| jasonpet/openwhisk | core/monitoring/user-events/src/main/scala/org/apache/openwhisk/core/monitoring/metrics/OpenWhiskEvents.scala | Scala | apache-2.0 | 2,919 |
package scala.slick.compiler
import scala.collection.immutable.HashMap
import scala.slick.SlickException
import scala.slick.util.Logging
import scala.slick.ast.{SymbolNamer, Node}
/** An immutable, stateless query compiler consisting of a series of phases */
class QueryCompiler(val phases: Vector[Phase]) extends Logging {
/** Return a new compiler with the new phase added at the end. */
def + (p: Phase) = new QueryCompiler(phases :+ p)
/** Return a new compiler with the new phase added directly after another
* phase (or a different implementation of the same phase name). */
def addAfter(p: Phase, after: Phase) = new QueryCompiler({
val i = phases.lastIndexWhere(_.name == after.name)
if(i == -1) throw new SlickException("Previous phase "+after.name+" not found")
else phases.patch(i+1, Seq(p), 0)
})
/** Return a new compiler with the new phase added directly before another
* phase (or a different implementation of the same phase name). */
def addBefore(p: Phase, before: Phase) = new QueryCompiler({
val i = phases.indexWhere(_.name == before.name)
if(i == -1) throw new SlickException("Following phase "+before.name+" not found")
else phases.patch(i, Seq(p), 0)
})
/** Return a new compiler without the given phase (or a different
* implementation of the same phase name. */
def - (p: Phase) = new QueryCompiler(phases.filterNot(_.name == p.name))
/** Return a new compiler that replaces an existing phase by a new one with
* the same name. The new phase must have a State that is assignable to the
* original phase's state. */
def replace(p: Phase) = new QueryCompiler(phases.map(o => if(o.name == p.name) p else o))
def run(tree: Node): CompilerState = {
val state = new CompilerState(this, tree)
run(state)
}
def run(state: CompilerState): CompilerState = {
if(logger.isDebugEnabled) state.symbolNamer.use { logger.debug("Source:", state.tree) }
phases.foldLeft(state){ case (n,p) => runPhase(p, n) }
}
def runBefore(before: Phase, state: CompilerState): CompilerState = {
if(logger.isDebugEnabled) state.symbolNamer.use { logger.debug("Source:", state.tree) }
phases.iterator.takeWhile(_.name != before.name).foldLeft(state){ case (n,p) => runPhase(p, n) }
}
protected[this] def runPhase(p: Phase, state: CompilerState): CompilerState = state.symbolNamer.use {
val s2 = p(state)
if(s2.tree ne state.tree) logger.debug("After phase "+p.name+":", s2.tree)
else logger.debug("After phase "+p.name+": (no change)")
s2
}
}
object QueryCompiler {
val standardPhases = Vector(
// Clean up trees from the lifted embedding
Phase.inline,
Phase.assignUniqueSymbols,
// Distribute and normalize
Phase.inferTypes,
Phase.createResultSetMapping,
Phase.forceOuterBinds,
// Convert to column form
Phase.expandTables,
Phase.expandRecords,
Phase.flattenProjections,
Phase.relabelUnions,
Phase.pruneFields,
Phase.assignTypes
)
val relationalPhases = Vector(
Phase.resolveZipJoins,
Phase.convertToComprehensions,
Phase.fuseComprehensions,
Phase.fixRowNumberOrdering,
Phase.hoistClientOps
)
/** The default compiler */
val standard = new QueryCompiler(standardPhases)
/** The default compiler with the additional conversion to relational trees */
val relational = new QueryCompiler(standardPhases ++ relationalPhases)
def apply(phases: Phase*) = new QueryCompiler(phases.toVector)
}
/** A phase of the query compiler, identified by a unique name */
trait Phase extends (CompilerState => CompilerState) with Logging {
/** The immutable state of the phase that can also be accessed by other phases. */
type State
/** The unique name of the phase */
val name: String
/** Run the phase */
def apply(state: CompilerState): CompilerState
}
object Phase {
/** The standard phases of the query compiler */
val inline = new Inline
val assignUniqueSymbols = new AssignUniqueSymbols
val inferTypes = new InferTypes
val createResultSetMapping = new CreateResultSetMapping
val forceOuterBinds = new ForceOuterBinds
val expandTables = new ExpandTables
val expandRecords = new ExpandRecords
val flattenProjections = new FlattenProjections
val relabelUnions = new RelabelUnions
val pruneFields = new PruneFields
val resolveZipJoins = new ResolveZipJoins
val assignTypes = new AssignTypes
val convertToComprehensions = new ConvertToComprehensions
val fuseComprehensions = new FuseComprehensions
val fixRowNumberOrdering = new FixRowNumberOrdering
val hoistClientOps = new HoistClientOps
/** Extra phases that are not enabled by default */
val rewriteBooleans = new RewriteBooleans
}
/** The current state of a compiler run, consisting of immutable state of
* individual phases. Mutability is confined to the SymbolNamer. */
class CompilerState private (val compiler: QueryCompiler, val symbolNamer: SymbolNamer,
val tree: Node, state: HashMap[String, Any]) {
def this(compiler: QueryCompiler, tree: Node) =
this(compiler, new SymbolNamer("s", "t"), tree, new HashMap)
def get[P <: Phase](p: P): Option[p.State] = state.get(p.name).asInstanceOf[Option[p.State]]
def + [S, P <: Phase { type State = S }](t: (P, S)) =
new CompilerState(compiler, symbolNamer, tree, state + (t._1.name -> t._2))
def withNode(n: Node) = new CompilerState(compiler, symbolNamer, n, state)
def map(f: Node => Node) = withNode(f(tree))
}
| retronym/slick | src/main/scala/scala/slick/compiler/QueryCompiler.scala | Scala | bsd-2-clause | 5,528 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.cluster.worker
import java.io.File
import java.lang.management.ManagementFactory
import java.net.URL
import java.util.concurrent.{Executors, TimeUnit}
import akka.actor.SupervisorStrategy.Stop
import akka.actor._
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import org.apache.gearpump.cluster.AppMasterToMaster.{GetWorkerData, WorkerData}
import org.apache.gearpump.cluster.AppMasterToWorker._
import org.apache.gearpump.cluster.ClientToMaster.{QueryHistoryMetrics, QueryWorkerConfig}
import org.apache.gearpump.cluster.MasterToClient.{HistoryMetrics, HistoryMetricsItem, WorkerConfig}
import org.apache.gearpump.cluster.MasterToWorker.{UpdateResourceSucceed, UpdateResourceFailed, WorkerRegistered}
import org.apache.gearpump.cluster.WorkerToAppMaster._
import org.apache.gearpump.cluster.WorkerToMaster.{RegisterNewWorker, RegisterWorker, ResourceUpdate}
import org.apache.gearpump.cluster.master.Master.MasterInfo
import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.Worker.ExecutorWatcher
import org.apache.gearpump.cluster.{ClusterConfig, ExecutorJVMConfig}
import org.apache.gearpump.jarstore.JarStoreClient
import org.apache.gearpump.metrics.Metrics.ReportMetrics
import org.apache.gearpump.metrics.{JvmMetricsSet, Metrics, MetricsReporterService}
import org.apache.gearpump.util.ActorSystemBooter.Daemon
import org.apache.gearpump.util.Constants._
import org.apache.gearpump.util.HistoryMetricsService.HistoryMetricsConfig
import org.apache.gearpump.util.{TimeOutScheduler, _}
import org.slf4j.Logger
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success, Try}
/**
* Worker is used to track the resource on single machine, it is like
* the node manager of YARN.
*
* @param masterProxy masterProxy is used to resolve the master
*/
private[cluster] class Worker(masterProxy: ActorRef) extends Actor with TimeOutScheduler {
private val systemConfig: Config = context.system.settings.config
private val address = ActorUtil.getFullPath(context.system, self.path)
private var resource = Resource.empty
private var allocatedResources = Map[ActorRef, Resource]()
private var executorsInfo = Map[ActorRef, ExecutorSlots]()
private var id: WorkerId = WorkerId.unspecified
private val createdTime = System.currentTimeMillis()
private var masterInfo: MasterInfo = null
private var executorNameToActor = Map.empty[String, ActorRef]
private val executorProcLauncher: ExecutorProcessLauncher = getExecutorProcLauncher()
private val jarStoreClient = new JarStoreClient(systemConfig, context.system)
private val ioPool = ExecutionContext.fromExecutorService(Executors.newCachedThreadPool())
private val resourceUpdateTimeoutMs = 30000 // Milliseconds
private var totalSlots: Int = 0
val metricsEnabled = systemConfig.getBoolean(GEARPUMP_METRIC_ENABLED)
var historyMetricsService: Option[ActorRef] = None
override def receive: Receive = null
var LOG: Logger = LogUtil.getLogger(getClass)
def service: Receive =
appMasterMsgHandler orElse
clientMessageHandler orElse
metricsService orElse
terminationWatch(masterInfo.master) orElse
ActorUtil.defaultMsgHandler(self)
def metricsService: Receive = {
case query: QueryHistoryMetrics =>
if (historyMetricsService.isEmpty) {
// Returns empty metrics so that we don't hang the UI
sender ! HistoryMetrics(query.path, List.empty[HistoryMetricsItem])
} else {
historyMetricsService.get forward query
}
}
private var metricsInitialized = false
val getHistoryMetricsConfig = HistoryMetricsConfig(systemConfig)
private def initializeMetrics(): Unit = {
// Registers jvm metrics
val metricsSetName = "worker" + WorkerId.render(id)
Metrics(context.system).register(new JvmMetricsSet(metricsSetName))
historyMetricsService = if (metricsEnabled) {
val historyMetricsService = {
context.actorOf(Props(new HistoryMetricsService(metricsSetName, getHistoryMetricsConfig)))
}
val metricsReportService = context.actorOf(Props(
new MetricsReporterService(Metrics(context.system))))
historyMetricsService.tell(ReportMetrics, metricsReportService)
Some(historyMetricsService)
} else {
None
}
}
def waitForMasterConfirm(timeoutTicker: Cancellable): Receive = {
// If master get disconnected, the WorkerRegistered may be triggered multiple times.
case WorkerRegistered(id, masterInfo) =>
this.id = id
// Adds the flag check, so that we don't re-initialize the metrics when worker re-register
// itself.
if (!metricsInitialized) {
initializeMetrics()
metricsInitialized = true
}
this.masterInfo = masterInfo
timeoutTicker.cancel()
context.watch(masterInfo.master)
this.LOG = LogUtil.getLogger(getClass, worker = id)
LOG.info(s"Worker is registered. " +
s"actor path: ${ActorUtil.getFullPath(context.system, self.path)} ....")
sendMsgWithTimeOutCallBack(masterInfo.master, ResourceUpdate(self, id, resource),
resourceUpdateTimeoutMs, updateResourceTimeOut())
context.become(service)
}
private def updateResourceTimeOut(): Unit = {
LOG.error(s"Update worker resource time out")
}
def appMasterMsgHandler: Receive = {
case shutdown@ShutdownExecutor(appId, executorId, reason: String) =>
val actorName = ActorUtil.actorNameForExecutor(appId, executorId)
val executorToStop = executorNameToActor.get(actorName)
if (executorToStop.isDefined) {
LOG.info(s"Shutdown executor ${actorName}(${executorToStop.get.path.toString}) " +
s"due to: $reason")
executorToStop.get.forward(shutdown)
} else {
LOG.error(s"Cannot find executor $actorName, ignore this message")
sender ! ShutdownExecutorFailed(s"Can not find executor $executorId for app $appId")
}
case launch: LaunchExecutor =>
LOG.info(s"$launch")
if (resource < launch.resource) {
sender ! ExecutorLaunchRejected("There is no free resource on this machine")
} else {
val actorName = ActorUtil.actorNameForExecutor(launch.appId, launch.executorId)
val executor = context.actorOf(Props(classOf[ExecutorWatcher], launch, masterInfo, ioPool,
jarStoreClient, executorProcLauncher))
executorNameToActor += actorName -> executor
resource = resource - launch.resource
allocatedResources = allocatedResources + (executor -> launch.resource)
reportResourceToMaster()
executorsInfo += executor ->
ExecutorSlots(launch.appId, launch.executorId, launch.resource.slots)
context.watch(executor)
}
case UpdateResourceFailed(reason, ex) =>
LOG.error(reason)
context.stop(self)
case UpdateResourceSucceed =>
LOG.info(s"Update resource succeed")
case GetWorkerData(workerId) =>
val aliveFor = System.currentTimeMillis() - createdTime
val logDir = LogUtil.daemonLogDir(systemConfig).getAbsolutePath
val userDir = System.getProperty("user.dir")
sender ! WorkerData(WorkerSummary(
id, "active",
address,
aliveFor,
logDir,
executorsInfo.values.toArray,
totalSlots,
resource.slots,
userDir,
jvmName = ManagementFactory.getRuntimeMXBean().getName(),
resourceManagerContainerId = systemConfig.getString(
GEARPUMP_WORKER_RESOURCE_MANAGER_CONTAINER_ID),
historyMetricsConfig = getHistoryMetricsConfig)
)
case ChangeExecutorResource(appId, executorId, usedResource) =>
for (executor <- executorActorRef(appId, executorId);
allocatedResource <- allocatedResources.get(executor)) {
allocatedResources += executor -> usedResource
resource = resource + allocatedResource - usedResource
reportResourceToMaster()
if (usedResource == Resource(0)) {
executorsInfo -= executor
allocatedResources -= executor
// stop executor if there is no resource binded to it.
LOG.info(s"Shutdown executor $executorId because the resource used is zero")
executor ! ShutdownExecutor(appId, executorId,
"Shutdown executor because the resource used is zero")
}
}
}
private def reportResourceToMaster(): Unit = {
sendMsgWithTimeOutCallBack(masterInfo.master,
ResourceUpdate(self, id, resource), resourceUpdateTimeoutMs, updateResourceTimeOut())
}
private def executorActorRef(appId: Int, executorId: Int): Option[ActorRef] = {
val actorName = ActorUtil.actorNameForExecutor(appId, executorId)
executorNameToActor.get(actorName)
}
def clientMessageHandler: Receive = {
case QueryWorkerConfig(workerId) =>
if (this.id == workerId) {
sender ! WorkerConfig(ClusterConfig.filterOutDefaultConfig(systemConfig))
} else {
sender ! WorkerConfig(ConfigFactory.empty)
}
}
private def retryRegisterWorker(workerId: WorkerId, timeOutSeconds: Int): Cancellable = {
repeatActionUtil(
seconds = timeOutSeconds,
action = () => {
masterProxy ! RegisterWorker(workerId)
},
onTimeout = () => {
LOG.error(s"Failed to register the worker $workerId after retrying for $timeOutSeconds " +
s"seconds, abort and kill the worker...")
self ! PoisonPill
})
}
def terminationWatch(master: ActorRef): Receive = {
case Terminated(actor) =>
if (actor.compareTo(master) == 0) {
// Parent master is down, no point to keep worker anymore. Let's make suicide to free
// resources
LOG.info(s"Master cannot be contacted, find a new master ...")
context.become(waitForMasterConfirm(retryRegisterWorker(id, timeOutSeconds = 30)))
} else if (ActorUtil.isChildActorPath(self, actor)) {
// One executor is down,
LOG.info(s"Executor is down ${getExecutorName(actor)}")
val allocated = allocatedResources.get(actor)
if (allocated.isDefined) {
resource = resource + allocated.get
executorsInfo -= actor
allocatedResources = allocatedResources - actor
sendMsgWithTimeOutCallBack(master, ResourceUpdate(self, id, resource),
resourceUpdateTimeoutMs, updateResourceTimeOut())
}
}
}
private def getExecutorName(actorRef: ActorRef): Option[String] = {
executorNameToActor.find(_._2 == actorRef).map(_._1)
}
private def getExecutorProcLauncher(): ExecutorProcessLauncher = {
val launcherClazz = Class.forName(
systemConfig.getString(GEARPUMP_EXECUTOR_PROCESS_LAUNCHER))
launcherClazz.getConstructor(classOf[Config]).newInstance(systemConfig)
.asInstanceOf[ExecutorProcessLauncher]
}
import context.dispatcher
override def preStart(): Unit = {
LOG.info(s"RegisterNewWorker")
totalSlots = systemConfig.getInt(GEARPUMP_WORKER_SLOTS)
this.resource = Resource(totalSlots)
masterProxy ! RegisterNewWorker
context.become(waitForMasterConfirm(registerTimeoutTicker(seconds = 30)))
}
private def registerTimeoutTicker(seconds: Int): Cancellable = {
repeatActionUtil(seconds, () => Unit, () => {
LOG.error(s"Failed to register new worker to Master after waiting for $seconds seconds, " +
s"abort and kill the worker...")
self ! PoisonPill
})
}
private def repeatActionUtil(seconds: Int, action: () => Unit, onTimeout: () => Unit)
: Cancellable = {
val cancelTimeout = context.system.scheduler.schedule(Duration.Zero,
Duration(2, TimeUnit.SECONDS))(action())
val cancelSuicide = context.system.scheduler.scheduleOnce(seconds.seconds)(onTimeout())
new Cancellable {
def cancel(): Boolean = {
val result1 = cancelTimeout.cancel()
val result2 = cancelSuicide.cancel()
result1 && result2
}
def isCancelled: Boolean = {
cancelTimeout.isCancelled && cancelSuicide.isCancelled
}
}
}
override def postStop(): Unit = {
LOG.info(s"Worker is going down....")
ioPool.shutdown()
context.system.terminate()
}
}
private[cluster] object Worker {
case class ExecutorResult(result: Try[Int])
class ExecutorWatcher(
launch: LaunchExecutor,
masterInfo: MasterInfo,
ioPool: ExecutionContext,
jarStoreClient: JarStoreClient,
procLauncher: ExecutorProcessLauncher) extends Actor {
import launch.{appId, executorId, resource}
private val LOG: Logger = LogUtil.getLogger(getClass, app = appId, executor = executorId)
val executorConfig: Config = {
val workerConfig = context.system.settings.config
val submissionConfig = Option(launch.executorJvmConfig).flatMap { jvmConfig =>
Option(jvmConfig.executorAkkaConfig)
}.getOrElse(ConfigFactory.empty())
resolveExecutorConfig(workerConfig, submissionConfig)
}
// For some config, worker has priority, for others, user Application submission config
// have priorities.
private def resolveExecutorConfig(workerConfig: Config, submissionConfig: Config): Config = {
val config = submissionConfig.withoutPath(GEARPUMP_HOSTNAME)
.withoutPath(GEARPUMP_CLUSTER_MASTERS)
.withoutPath(GEARPUMP_HOME)
.withoutPath(GEARPUMP_LOG_DAEMON_DIR)
.withoutPath(GEARPUMP_LOG_APPLICATION_DIR)
.withoutPath(GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS)
// Falls back to workerConfig
.withFallback(workerConfig)
// Minimum supported akka.scheduler.tick-duration on Windows is 10ms
val duration = config.getInt(AKKA_SCHEDULER_TICK_DURATION)
val updatedConf = if (akka.util.Helpers.isWindows && duration < 10) {
LOG.warn(s"$AKKA_SCHEDULER_TICK_DURATION on Windows must be larger than 10ms, set to 10ms")
config.withValue(AKKA_SCHEDULER_TICK_DURATION, ConfigValueFactory.fromAnyRef(10))
} else {
config
}
// Excludes reference.conf, and JVM properties..
ClusterConfig.filterOutDefaultConfig(updatedConf)
}
implicit val executorService = ioPool
private val executorHandler = {
val ctx = launch.executorJvmConfig
if (executorConfig.getBoolean(GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS)) {
new ExecutorHandler {
val exitPromise = Promise[Int]()
val app = context.actorOf(Props(new InJvmExecutor(launch, exitPromise)))
override def destroy(): Unit = {
context.stop(app)
}
override def exitValue: Future[Int] = {
exitPromise.future
}
}
} else {
createProcess(ctx)
}
}
private def createProcess(ctx: ExecutorJVMConfig): ExecutorHandler = {
val process = Future {
val jarPath = ctx.jar.map { appJar =>
val tempFile = File.createTempFile(appJar.name, ".jar")
jarStoreClient.copyToLocalFile(tempFile, appJar.filePath)
val file = new URL("file:" + tempFile)
file.getFile
}
val configFile = {
val configFile = File.createTempFile("gearpump", ".conf")
ClusterConfig.saveConfig(executorConfig, configFile)
val file = new URL("file:" + configFile)
file.getFile
}
val classPath = filterOutDaemonLib(Util.getCurrentClassPath) ++
ctx.classPath.map(path => expandEnviroment(path)) ++
jarPath.map(Array(_)).getOrElse(Array.empty[String])
val appLogDir = executorConfig.getString(GEARPUMP_LOG_APPLICATION_DIR)
val logArgs = List(
s"-D${GEARPUMP_APPLICATION_ID}=${launch.appId}",
s"-D${GEARPUMP_EXECUTOR_ID}=${launch.executorId}",
s"-D${GEARPUMP_MASTER_STARTTIME}=${getFormatedTime(masterInfo.startTime)}",
s"-D${GEARPUMP_LOG_APPLICATION_DIR}=${appLogDir}")
val configArgs = List(s"-D${GEARPUMP_CUSTOM_CONFIG_FILE}=$configFile")
val username = List(s"-D${GEARPUMP_USERNAME}=${ctx.username}")
// Remote debug executor process
val remoteDebugFlag = executorConfig.getBoolean(GEARPUMP_REMOTE_DEBUG_EXECUTOR_JVM)
val remoteDebugConfig = if (remoteDebugFlag) {
val availablePort = Util.findFreePort().get
List(
"-Xdebug",
s"-Xrunjdwp:server=y,transport=dt_socket,address=${availablePort},suspend=n",
s"-D${GEARPUMP_REMOTE_DEBUG_PORT}=$availablePort"
)
} else {
List.empty[String]
}
val verboseGCFlag = executorConfig.getBoolean(GEARPUMP_VERBOSE_GC)
val verboseGCConfig = if (verboseGCFlag) {
List(
s"-Xloggc:${appLogDir}/gc-app${launch.appId}-executor-${launch.executorId}.log",
"-verbose:gc",
"-XX:+PrintGCDetails",
"-XX:+PrintGCDateStamps",
"-XX:+PrintTenuringDistribution",
"-XX:+PrintGCApplicationConcurrentTime",
"-XX:+PrintGCApplicationStoppedTime"
)
} else {
List.empty[String]
}
val ipv4 = List(s"-D${PREFER_IPV4}=true")
val gearpumpHome =
List(s"-D${Constants.GEARPUMP_HOME}=${System.getProperty(Constants.GEARPUMP_HOME)}")
val options = ctx.jvmArguments ++ username ++
logArgs ++ remoteDebugConfig ++ verboseGCConfig ++ ipv4 ++ gearpumpHome ++ configArgs
val process = procLauncher.createProcess(appId, executorId, resource, executorConfig,
options, classPath, ctx.mainClass, ctx.arguments)
ProcessInfo(process, jarPath, configFile)
}
new ExecutorHandler {
var destroyed = false
override def destroy(): Unit = {
LOG.info(s"Destroy executor process ${ctx.mainClass}")
if (!destroyed) {
destroyed = true
process.foreach { info =>
info.process.destroy()
info.jarPath.foreach(new File(_).delete())
new File(info.configFile).delete()
}
}
}
override def exitValue: Future[Int] = {
process.flatMap { info =>
val exit = info.process.exitValue()
if (exit == 0) {
Future.successful(0)
} else {
Future.failed[Int](new Exception(s"Executor exit with failure, exit value: $exit, " +
s"error summary: ${info.process.logger.error}"))
}
}
}
}
}
private def expandEnviroment(path: String): String = {
// TODO: extend this to support more environment.
path.replace(s"<${GEARPUMP_HOME}>", executorConfig.getString(GEARPUMP_HOME))
}
override def preStart(): Unit = {
executorHandler.exitValue.onComplete { value =>
procLauncher.cleanProcess(appId, executorId)
val result = ExecutorResult(value)
self ! result
}
}
override def postStop(): Unit = {
executorHandler.destroy()
}
// The folders are under ${GEARPUMP_HOME}
val daemonPathPattern = List("lib" + File.separator + "yarn")
override def receive: Receive = {
case ShutdownExecutor(appId, executorId, reason: String) =>
executorHandler.destroy()
sender ! ShutdownExecutorSucceed(appId, executorId)
context.stop(self)
case ExecutorResult(executorResult) =>
executorResult match {
case Success(exit) => LOG.info("Executor exit normally with exit value " + exit)
case Failure(e) => LOG.error("Executor exit with errors", e)
}
context.stop(self)
}
private def getFormatedTime(timestamp: Long): String = {
val datePattern = "yyyy-MM-dd-HH-mm"
val format = new java.text.SimpleDateFormat(datePattern)
format.format(timestamp)
}
private def filterOutDaemonLib(classPath: Array[String]): Array[String] = {
classPath.filterNot(matchDaemonPattern(_))
}
private def matchDaemonPattern(path: String): Boolean = {
daemonPathPattern.exists(path.contains(_))
}
}
trait ExecutorHandler {
def destroy(): Unit
def exitValue: Future[Int]
}
case class ProcessInfo(process: RichProcess, jarPath: Option[String], configFile: String)
/**
* Starts the executor in the same JVM as worker.
*/
class InJvmExecutor(launch: LaunchExecutor, exit: Promise[Int])
extends Daemon(launch.executorJvmConfig.arguments(0), launch.executorJvmConfig.arguments(1)) {
private val exitCode = 0
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1.minute) {
case ex: Throwable =>
LOG.error(s"system $name stopped ", ex)
exit.failure(ex)
Stop
}
override def postStop(): Unit = {
if (!exit.isCompleted) {
exit.success(exitCode)
}
}
}
} | manuzhang/incubator-gearpump | core/src/main/scala/org/apache/gearpump/cluster/worker/Worker.scala | Scala | apache-2.0 | 21,904 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.ui
import java.util
import java.util.{Locale, Properties}
import javax.servlet.http.HttpServletRequest
import org.apache.spark.SparkConf
import scala.xml.Node
import org.mockito.Mockito.{RETURNS_SMART_NULLS, mock, when}
import org.scalatest.BeforeAndAfter
import org.apache.spark.scheduler.{JobFailed, SparkListenerJobEnd, SparkListenerJobStart}
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.execution.{SQLExecution, SparkPlanInfo}
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.status.ElementTrackingStore
import org.apache.spark.util.kvstore.InMemoryStore
class AllExecutionsPageSuite extends SharedSparkSession with BeforeAndAfter {
import testImplicits._
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
var kvstore: ElementTrackingStore = _
after {
if (kvstore != null) {
kvstore.close()
kvstore = null
}
}
ignore("SPARK-27019: correctly display SQL page when event reordering happens") {
val statusStore = createStatusStore
val tab = mock(classOf[SQLTab], RETURNS_SMART_NULLS)
when(tab.sqlStore).thenReturn(statusStore)
val request = mock(classOf[HttpServletRequest])
when(tab.appName).thenReturn("testing")
when(tab.headerTabs).thenReturn(Seq.empty)
val html = renderSQLPage(request, tab, statusStore).toString().toLowerCase(Locale.ROOT)
assert(html.contains("failed queries"))
assert(!html.contains("1970/01/01"))
}
test("sorting should be successful") {
val statusStore = createStatusStore
val tab = mock(classOf[SQLTab], RETURNS_SMART_NULLS)
val request = mock(classOf[HttpServletRequest])
when(tab.sqlStore).thenReturn(statusStore)
when(tab.appName).thenReturn("testing")
when(tab.headerTabs).thenReturn(Seq.empty)
when(request.getParameter("failed.sort")).thenReturn("Duration")
val map = new util.HashMap[String, Array[String]]()
map.put("failed.sort", Array("duration"))
when(request.getParameterMap()).thenReturn(map)
val html = renderSQLPage(request, tab, statusStore).toString().toLowerCase(Locale.ROOT)
assert(!html.contains("illegalargumentexception"))
assert(html.contains("duration"))
}
private def createStatusStore: SQLAppStatusStore = {
val conf = sparkContext.conf
kvstore = new ElementTrackingStore(new InMemoryStore, conf)
val listener = new SQLAppStatusListener(conf, kvstore, live = true)
new SQLAppStatusStore(kvstore, Some(listener))
}
private def createTestDataFrame: DataFrame = {
Seq(
(1, 1),
(2, 2)
).toDF().filter("_1 > 1")
}
/**
* Render a stage page started with the given conf and return the HTML.
* This also runs a dummy execution page to populate the page with useful content.
*/
private def renderSQLPage(
request: HttpServletRequest,
tab: SQLTab,
statusStore: SQLAppStatusStore): Seq[Node] = {
val listener = statusStore.listener.get
val page = new AllExecutionsPage(tab)
Seq(0, 1).foreach { executionId =>
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobFailed(new RuntimeException("Oops"))))
}
page.render(request)
}
private def createProperties(executionId: Long): Properties = {
val properties = new Properties()
properties.setProperty(SQLExecution.EXECUTION_ID_KEY, executionId.toString)
properties
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/ui/AllExecutionsPageSuite.scala | Scala | apache-2.0 | 5,799 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.transformers
import akka.testkit.TestActorRef
import akka.util.ByteString
import com.wegtam.tensei.adt.TransformerOptions
import com.wegtam.tensei.agent.ActorSpec
import com.wegtam.tensei.agent.transformers.BaseTransformer.{
PrepareForTransformation,
ReadyToTransform,
StartTransformation,
TransformerResponse
}
class SplitTest extends ActorSpec {
describe("Transfomers") {
describe("Split") {
describe("when given an empty list") {
it("should return an empty string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
actor ! StartTransformation(List(),
new TransformerOptions(classOf[String], classOf[String]))
val response = TransformerResponse(List(ByteString("")), classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
describe("with an empty pattern") {
it("should return an empty string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("pattern", ""))
actor ! StartTransformation(List(),
new TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
expectMsg(response)
}
}
describe("with a limit") {
it("should return an empty string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("limit", "2"))
actor ! StartTransformation(List(),
new TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
expectMsg(response)
}
}
describe("with selected entries") {
it("should return an empty string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("selected", "1,2"))
actor ! StartTransformation(List(),
new TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
expectMsg(response)
}
}
describe("with selected entries and a limit") {
it("should return an empty string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("selected", "1,2"), ("limit", "2"))
actor ! StartTransformation(List(),
new TransformerOptions(classOf[String],
classOf[String],
params))
val response = TransformerResponse(List(ByteString("")), classOf[String])
expectMsg(response)
}
}
}
describe("when given a string") {
it("should return the unsplitted string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
actor ! StartTransformation(
List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
new TransformerOptions(classOf[String], classOf[String])
)
val response =
TransformerResponse(List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
classOf[String])
expectMsg(response)
}
describe("with a pattern that is not in the string") {
it("should return the string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("pattern", ";"))
actor ! StartTransformation(
List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
new TransformerOptions(classOf[String], classOf[String], params)
)
val response =
TransformerResponse(List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with a pattern") {
it("should return the splitted parts of the string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("pattern", ","))
actor ! StartTransformation(
List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
new TransformerOptions(classOf[String], classOf[String], params)
)
val response = TransformerResponse(List(ByteString("alex"),
ByteString("mustermann"),
ByteString("25.11.1980"),
ByteString("0381-123456789")),
classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with a pattern and a limit") {
it("should return the limited first splitted parts of the string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("pattern", ","), ("limit", "2"))
actor ! StartTransformation(
List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
new TransformerOptions(classOf[String], classOf[String], params)
)
val response = TransformerResponse(List(ByteString("alex"), ByteString("mustermann")),
classOf[String])
val d = expectMsgType[TransformerResponse]
d.data shouldEqual response.data
}
}
describe("with a pattern and a selected amount of splitted parts") {
it("should return the selected splitted parts of the string") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("pattern", ","), ("selected", "0, 2, 3"))
actor ! StartTransformation(
List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
new TransformerOptions(classOf[String], classOf[String], params)
)
val response = TransformerResponse(List(ByteString("alex"),
ByteString("25.11.1980"),
ByteString("0381-123456789")),
classOf[String])
expectMsg(response)
}
}
describe(
"with a pattern and a selected amount of splitted parts where the selected parts are incorrect"
) {
it("should return the selected splitted parts of the string that are within the list") {
val actor = TestActorRef(Split.props)
actor ! PrepareForTransformation
expectMsg(ReadyToTransform)
val params = List(("pattern", ","), ("selected", "0, 2, 5"))
actor ! StartTransformation(
List(ByteString("alex, mustermann, 25.11.1980, 0381-123456789")),
new TransformerOptions(classOf[String], classOf[String], params)
)
val response = TransformerResponse(List(ByteString("alex"),
ByteString("25.11.1980"),
ByteString("")),
classOf[String])
expectMsg(response)
}
}
}
}
}
}
| Tensei-Data/tensei-agent | src/test/scala/com/wegtam/tensei/agent/transformers/SplitTest.scala | Scala | agpl-3.0 | 9,825 |
package core
import core.rendering.ViewType
import eu.delving.schema.SchemaVersion
import models.OrganizationConfiguration
import play.api.mvc.RequestHeader
/**
*
* @author Manuel Bernhardt <bernhardt.manuel@gmail.com>
*/
trait RecordResolverService {
/**
* Retrieves a record given a global hubId
*
* @param hubId the ID of the record
* @param schemaVersion the (optional) version of the schema to be fetched
*/
def getRecord(hubId: HubId, schemaVersion: Option[SchemaVersion] = None)(implicit request: RequestHeader, configuration: OrganizationConfiguration): Option[RenderableRecord]
}
case class RenderableRecord(recordXml: String,
systemFields: Map[String, List[String]],
schemaVersion: SchemaVersion,
viewType: ViewType = ViewType.HTML,
parameters: Map[String, Seq[String]] = Map.empty,
hasRelatedItems: Boolean = false,
resolveRefererLink: Option[String => (String, String)] = None,
availableSchemas: List[SchemaVersion] = List.empty) | delving/culture-hub | web-core/app/core/RecordResolverService.scala | Scala | apache-2.0 | 982 |
package com.mesosphere.cosmos.error
import cats.data.Ior
import com.mesosphere.universe.common.circe.Encoders._
import com.netaporter.uri.Uri
import io.circe.JsonObject
import io.circe.syntax._
final case class RepositoryNotPresent(nameOrUri: Ior[String, Uri]) extends CosmosError {
override def data: Option[JsonObject] = {
val jsonMap = nameOrUri match {
case Ior.Both(n, u) => Map("name" -> n.asJson, "uri" -> u.asJson)
case Ior.Left(n) => Map("name" -> n.asJson)
case Ior.Right(u) => Map("uri" -> u.asJson)
}
Some(JsonObject.fromMap(jsonMap))
}
override def message: String = {
nameOrUri match {
case Ior.Both(n, u) => s"Neither repository name [$n] nor URI [$u] are present in the list"
case Ior.Left(n) => s"Repository name [$n] is not present in the list"
case Ior.Right(u) => s"Repository URI [$u] is not present in the list"
}
}
}
| takirala/cosmos | cosmos-common/src/main/scala/com/mesosphere/cosmos/error/RepositoryNotPresent.scala | Scala | apache-2.0 | 906 |
package eu.stratosphere.peel.datagen.flink
import eu.stratosphere.peel.datagen.flink.Distributions._
import eu.stratosphere.peel.datagen.util.RanHash
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem
import org.apache.flink.util.NumberSequenceIterator
object WordGenerator {
val SEED = 1010
def main(args: Array[String]): Unit = {
if (args.length != 6) {
Console.err.println("Usage: <jar> numberOfWorkers coresPerWorker tuplesPerTask sizeOfDictionary distribution[params] outputPath")
System.exit(-1)
}
val numberOfWorkers = args(0).toInt
val coresPerWorker = args(1).toInt
val tuplesPerTask = args(2).toInt
val sizeOfDictionary = args(3).toInt
implicit val distribution = parseDist(sizeOfDictionary, args(4))
val outputPath = args(5)
val dop = coresPerWorker * numberOfWorkers
val N = dop * tuplesPerTask
// generate dictionary of random words
implicit val dictionary = new Dictionary(SEED, sizeOfDictionary).words()
val environment = ExecutionEnvironment.getExecutionEnvironment
environment
// create a sequence [1 .. N] to create N words
.fromParallelCollection(new NumberSequenceIterator(1, N))
// set up workers
.setParallelism(dop)
// map every n <- [1 .. N] to a random word sampled from a word list
.map(i => word(i))
// write result to file
.writeAsText(outputPath, FileSystem.WriteMode.OVERWRITE)
environment.execute(s"WordGenerator[$N]")
}
def word(i: Long)(implicit dictionary: Array[String], distribution: DiscreteDistribution) = {
dictionary(distribution.sample(new RanHash(SEED + i).next()))
}
object Patterns {
val DiscreteUniform = """(Uniform)""".r
val Binomial = """Binomial\\[(1|1\\.0|0\\.\\d+)\\]""".r
val Zipf = """Zipf\\[(\\d+(?:\\.\\d+)?)\\]""".r
}
def parseDist(card: Int, s: String): DiscreteDistribution = s match {
case Patterns.DiscreteUniform(_) => DiscreteUniform(card)
case Patterns.Binomial(a) => Binomial(card, a.toDouble)
case Patterns.Zipf(a) => Zipf(card, a.toDouble)
}
}
| carabolic/peel-wordcount-bundle | peel-wordcount-flink-datagens/src/main/scala/eu/stratosphere/peel/datagen/flink/WordGenerator.scala | Scala | apache-2.0 | 2,183 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js API **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2015, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package scala.scalajs.js.annotation
/** IMPLEMENTATION DETAIL: Saves the fully qualified JS name of a symbol.
*
* Do not use this annotation yourself.
*/
class JSFullName(fullName: String) extends scala.annotation.StaticAnnotation
| lrytz/scala-js | library/src/main/scala/scala/scalajs/js/annotation/JSFullName.scala | Scala | bsd-3-clause | 763 |
import org.automanlang.adapters.mturk.DSL._
import org.automanlang.core.logging.LogLevelDebug
import org.automanlang.core.policy.aggregation.UserDefinableSpawnPolicy
object SimpleCheckboxProgram extends App {
val opts = Utilities.unsafe_optparse(args, "simple_checkbox_program")
implicit val a = mturk (
access_key_id = opts('key),
secret_access_key = opts('secret),
sandbox_mode = opts('sandbox).toBoolean,
log_verbosity = LogLevelDebug()
)
def which_one(text: String) = checkbox (
budget = 8.00,
text = text,
options = List[MTQuestionOption](
"Oscar the Grouch" -> "http://tinyurl.com/qfwlx56",
"Kermit the Frog" -> "http://tinyurl.com/nuwyz3u",
"Spongebob Squarepants" -> "http://tinyurl.com/oj6wzx6",
"Cookie Monster" -> "http://tinyurl.com/otb6thl",
"The Count" -> "http://tinyurl.com/nfdbyxa"
),
minimum_spawn_policy = UserDefinableSpawnPolicy(0)
)
automan(a) {
val outcome = which_one("Which of these DO NOT BELONG? (check all that apply)")
outcome.answer match {
case a:Answer[Set[Symbol]] =>
println("Answers are: " + a.value.map(_.toString).mkString(","))
case _ => println("Error occurred.")
}
}
} | dbarowy/AutoMan | apps/simple/SimpleCheckboxProgram/src/main/scala/SimpleCheckboxProgram.scala | Scala | gpl-2.0 | 1,227 |
/**
* Copyright (C) 2012 Typesafe, Inc. <http://www.typesafe.com>
*/
package org.pantsbuild.zinc.compiler
import java.io.File
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{FileVisitResult, Files, Path, Paths, SimpleFileVisitor}
import java.util.jar.{JarEntry, JarInputStream, JarOutputStream}
import scala.annotation.tailrec
import scala.collection.mutable
object OutputUtils {
/**
* Sort the contents of the `dir` in lexicographic order.
*
* @param dir File handle containing the contents to sort
* @return sorted set of all paths within the `dir`
*/
def sort(dir:File): mutable.TreeSet[Path] = {
val sorted = new mutable.TreeSet[Path]()
val fileSortVisitor = new SimpleFileVisitor[Path]() {
override def preVisitDirectory(path: Path, attrs: BasicFileAttributes): FileVisitResult = {
if (!path.endsWith("/")) {
sorted.add(Paths.get(path.toString, "/"))
} else {
sorted.add(path)
}
FileVisitResult.CONTINUE
}
override def visitFile(path: Path, attrs: BasicFileAttributes): FileVisitResult = {
sorted.add(path)
FileVisitResult.CONTINUE
}
}
Files.walkFileTree(dir.toPath, fileSortVisitor)
sorted
}
def relativize(base: String, path: Path): String = {
new File(base.toString).toURI().relativize(new File(path.toString).toURI()).getPath()
}
/**
* Create a JAR of of filePaths provided.
*
* @param filePaths set of all paths to be added to the JAR
* @param outputJarPath Absolute Path to the output JAR being created
* @param jarEntryTime time to be set for each JAR entry
*/
def createJar(
base: String, filePaths: mutable.TreeSet[Path], outputJarPath: Path, jarEntryTime: Long) {
val target = new JarOutputStream(Files.newOutputStream(outputJarPath))
def jarEntry(name: String): JarEntry = {
val jarEntry = new JarEntry(name)
// setting jarEntry time to a fixed value for all entries within the jar so that jars are
// byte-for-byte reproducible.
jarEntry.setTime(jarEntryTime)
jarEntry
}
def addToJar(source: Path, entryName: String): FileVisitResult = {
if (source.toFile.isDirectory) {
target.putNextEntry(jarEntry(entryName))
} else {
target.putNextEntry(jarEntry(entryName))
Files.copy(source, target)
}
target.closeEntry()
FileVisitResult.CONTINUE
}
val pathToName = filePaths.zipWithIndex.map{case(k, v) => (k, relativize(base, k))}.toMap
pathToName.map(e => addToJar(e._1, e._2))
target.close()
}
/**
* Jar the contents of output classes (settings.classesDirectory) and copy to settings.outputJar
*
*/
def createClassesJar(classesDirectory: File, outputJarPath: Path, jarCreationTime: Long) = {
// Sort the contents of the classesDirectory for deterministic jar creation
val sortedClasses = sort(classesDirectory)
createJar(classesDirectory.toString, sortedClasses, outputJarPath, jarCreationTime)
}
/**
* Determines if a file exists in a JAR provided.
*
* @param jarPath Absolute Path to the JAR being inspected
* @param fileName Name of the file, the existence of which is to be inspected
* @return
*/
def existsClass(jarPath: Path, fileName: String): Boolean = {
var jis: JarInputStream = null
var found = false
try {
jis = new JarInputStream(Files.newInputStream(jarPath))
@tailrec
def findClass(entry: JarEntry): Boolean = entry match {
case null =>
false
case entry if entry.getName == fileName =>
true
case _ =>
findClass(jis.getNextJarEntry)
}
found = findClass(jis.getNextJarEntry)
} finally {
jis.close()
}
found
}
}
| twitter/pants | src/scala/org/pantsbuild/zinc/compiler/OutputUtils.scala | Scala | apache-2.0 | 3,830 |
package co.ledger.wallet.web.ripple.i18n
import scala.scalajs.js
/**
*
* TranslateProvider
* ledger-wallet-ripple-chrome
*
* Created by Pierre Pollastri on 02/06/2016.
*
* The MIT License (MIT)
*
* Copyright (c) 2016 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
@js.native
trait TranslateProvider extends js.Any {
def useStaticFilesLoader(options: js.Dictionary[String]): TranslateProvider = js.native
def preferredLanguage(language: String): TranslateProvider = js.native
def useSanitizeValueStrategy(strategy: String): TranslateProvider = js.native
def determinePreferredLanguage(): TranslateProvider = js.native
def fallbackLanguage(language: String): TranslateProvider = js.native
def registerAvailableLanguageKeys(languageKeys: js.Array[String], aliases: js.Dictionary[String]): TranslateProvider = js.native
}
| LedgerHQ/ledger-wallet-ripple | src/main/scala/co/ledger/wallet/web/ripple/i18n/TranslateProvider.scala | Scala | mit | 1,912 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.jobs.scalding.taps
import java.io.Closeable
import java.util.Properties
import cascading.flow.FlowProcess
import cascading.scheme.{SinkCall, SourceCall}
import cascading.tuple._
import com.twitter.scalding.AccessMode
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.client.{BatchWriter, MultiTableBatchWriter, ZooKeeperInstance}
import org.apache.accumulo.core.data.{Key, Mutation, Value}
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.accumulo.util.GeoMesaBatchWriterConfig
import org.locationtech.geomesa.jobs.scalding._
import scala.collection.JavaConversions._
import scala.util.{Failure, Success, Try}
/**
* Cascading Tap to read and write from accumulo in local mode
*/
case class AccumuloLocalTap(readOrWrite: AccessMode, scheme: AccumuloLocalScheme)
extends AccLocalTap(scheme) with Logging {
val options = scheme.options
val getIdentifier: String = toString
lazy val connector = new ZooKeeperInstance(options.instance, options.zooKeepers)
.getConnector(options.user, new PasswordToken(options.password))
lazy val tableOps = connector.tableOperations()
override def openForRead(fp: FlowProcess[Properties], rr: KVRecordReader): TupleEntryIterator = {
val input = options.asInstanceOf[AccumuloInputOptions]
val scanner = if (input.ranges.isEmpty) {
connector.createScanner(input.table, input.authorizations)
} else {
val bs = connector.createBatchScanner(input.table, input.authorizations, 5)
bs.setRanges(input.ranges.flatMap(SerializedRangeSeq.unapply))
bs
}
input.iterators.foreach(scanner.addScanIterator)
input.columns.flatMap(SerializedColumnSeq.unapply)
.foreach(p => scanner.fetchColumn(p.getFirst, p.getSecond))
val entries = scanner.iterator()
val iterator = new KVRecordReader() with Closeable {
var pos: Int = 0
override def next(key: Key, value: Value) = if (entries.hasNext) {
val next = entries.next()
key.set(next.getKey)
value.set(next.getValue.get)
pos += 1
true
} else {
false
}
override def getProgress = 0f
override def getPos = pos
override def createKey() = new Key()
override def createValue() = new Value()
override def close() = scanner.close()
}
new TupleEntrySchemeIterator(fp, scheme, iterator)
}
override def openForWrite(fp: FlowProcess[Properties], out: MutOutputCollector): TupleEntryCollector = {
val collector = new AccumuloLocalCollector(fp, this)
collector.prepare()
collector
}
override def createResource(conf: Properties): Boolean =
Try(tableOps.create(options.table)) match {
case Success(_) => true
case Failure(e) =>
logger.error(s"Error creating table ${options.table}", e)
false
}
override def deleteResource(conf: Properties): Boolean =
Try(tableOps.delete(options.table)) match {
case Success(_) => true
case Failure(e) =>
logger.error(s"Error deleting table ${options.table}", e)
false
}
override def resourceExists(conf: Properties): Boolean = tableOps.exists(options.table)
override def getModifiedTime(conf: Properties): Long = System.currentTimeMillis()
override def toString = s"AccumuloLocalTap[$readOrWrite,$options]"
}
/**
* Collector that writes directly to accumulo
*/
class AccumuloLocalCollector(flowProcess: FlowProcess[Properties], tap: AccumuloLocalTap)
extends TupleEntrySchemeCollector[Properties, MutOutputCollector](flowProcess, tap.getScheme)
with MutOutputCollector {
setOutput(this)
private var writer: MultiTableBatchWriter = null
private val writerCache = scala.collection.mutable.Map.empty[Text, BatchWriter]
private val defaultTable = new Text(tap.options.table)
override def prepare(): Unit = {
val instance = new ZooKeeperInstance(tap.options.instance, tap.options.zooKeepers)
val connector = instance.getConnector(tap.options.user, new PasswordToken(tap.options.password))
writer = connector.createMultiTableBatchWriter(GeoMesaBatchWriterConfig())
sinkCall.setOutput(this)
super.prepare()
}
override def close(): Unit = {
writer.close()
super.close()
}
override def collect(t: Text, m: Mutation): Unit = {
val table = if (t == null) defaultTable else t
val bw = writerCache.getOrElseUpdate(table, writer.getBatchWriter(t.toString))
bw.addMutation(m)
}
}
/**
* Scheme to map between key value pairs and mutations
*/
case class AccumuloLocalScheme(options: AccumuloSourceOptions)
extends AccLocalScheme(AccumuloSource.sourceFields, AccumuloSource.sinkFields) {
override def sourceConfInit(fp: FlowProcess[Properties], tap: AccLocalTap, conf: Properties): Unit = {}
override def sinkConfInit(fp: FlowProcess[Properties], tap: AccLocalTap, conf: Properties): Unit = {}
override def source(fp: FlowProcess[Properties], sc: SourceCall[Array[Any], KVRecordReader]): Boolean = {
val context = sc.getContext
val k = context(0).asInstanceOf[Key]
val v = context(1).asInstanceOf[Value]
val hasNext = sc.getInput.next(k, v)
if (hasNext) {
sc.getIncomingEntry.setTuple(new Tuple(k, v))
}
hasNext
}
override def sink(fp: FlowProcess[Properties], sc: SinkCall[Array[Any], MutOutputCollector]): Unit = {
val entry = sc.getOutgoingEntry
val table = entry.getObject(0).asInstanceOf[Text]
val mutation = entry.getObject(1).asInstanceOf[Mutation]
sc.getOutput.collect(table, mutation)
}
override def sourcePrepare(fp: FlowProcess[Properties], sc: SourceCall[Array[Any], KVRecordReader]): Unit =
sc.setContext(Array(sc.getInput.createKey(), sc.getInput.createValue()))
override def sourceCleanup(fp: FlowProcess[Properties], sc: SourceCall[Array[Any], KVRecordReader]): Unit =
sc.setContext(null)
}
| giserh/geomesa | geomesa-jobs/src/main/scala/org/locationtech/geomesa/jobs/scalding/taps/AccumuloLocalTap.scala | Scala | apache-2.0 | 6,453 |
package gapt.proofs.lk.rules
import gapt.expr.Expr
import gapt.expr.formula.Formula
import gapt.proofs.HOLSequent
import gapt.proofs.Sequent
import gapt.proofs.context.Context
import gapt.proofs.context.facet.ProofNames
case class ProofLink( referencedProof: Expr, referencedSequent: Sequent[Formula] ) extends InitialSequent {
override def name: String = "link"
override def conclusion: HOLSequent = referencedSequent
}
object ProofLink {
def apply( referencedProof: Expr )( implicit ctx: Context ): ProofLink =
ProofLink( referencedProof, ctx.get[ProofNames].lookup( referencedProof ).get )
def apply( name: String )( implicit ctx: Context ): ProofLink =
ProofLink( ctx.get[ProofNames].names( name )._1 )
} | gapt/gapt | core/src/main/scala/gapt/proofs/lk/rules/ProofLink.scala | Scala | gpl-3.0 | 727 |
package jigg.ml.keras
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licencses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitation under the License.
*/
import breeze.linalg.DenseMatrix
object Flatten extends Functor{
override def functorName = "Flatten"
override final def convert(data: DenseMatrix[Float]): DenseMatrix[Float] = data.t.toDenseVector.toDenseMatrix
def apply(x: DenseMatrix[Float]): DenseMatrix[Float] = this.convert(x)
}
| tomeken-yoshinaga/jigg | src/main/scala/jigg/ml/keras/Flatten.scala | Scala | apache-2.0 | 901 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, Properties}
import scala.collection.JavaConverters._
import com.fasterxml.jackson.databind.ObjectMapper
import com.univocity.parsers.csv.CsvParser
import org.apache.spark.Partition
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.{DataSource, FailureSafeParser}
import org.apache.spark.sql.execution.datasources.csv._
import org.apache.spark.sql.execution.datasources.jdbc._
import org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Utils
import org.apache.spark.sql.sources.v2.{BatchReadSupportProvider, DataSourceOptions, DataSourceV2}
import org.apache.spark.sql.types.{StringType, StructType}
import org.apache.spark.unsafe.types.UTF8String
/**
* Interface used to load a [[Dataset]] from external storage systems (e.g. file systems,
* key-value stores, etc). Use `SparkSession.read` to access this.
*
* @since 1.4.0
*/
@InterfaceStability.Stable
class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
/**
* Specifies the input data source format.
*
* @since 1.4.0
*/
def format(source: String): DataFrameReader = {
this.source = source
this
}
/**
* Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema
* automatically from data. By specifying the schema here, the underlying data source can
* skip the schema inference step, and thus speed up data loading.
*
* @since 1.4.0
*/
def schema(schema: StructType): DataFrameReader = {
this.userSpecifiedSchema = Option(schema)
this
}
/**
* Specifies the schema by using the input DDL-formatted string. Some data sources (e.g. JSON) can
* infer the input schema automatically from data. By specifying the schema here, the underlying
* data source can skip the schema inference step, and thus speed up data loading.
*
* {{{
* spark.read.schema("a INT, b STRING, c DOUBLE").csv("test.csv")
* }}}
*
* @since 2.3.0
*/
def schema(schemaString: String): DataFrameReader = {
this.userSpecifiedSchema = Option(StructType.fromDDL(schemaString))
this
}
/**
* Adds an input option for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def option(key: String, value: String): DataFrameReader = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataFrameReader = option(key, value.toString)
/**
* (Scala-specific) Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: scala.collection.Map[String, String]): DataFrameReader = {
this.extraOptions ++= options
this
}
/**
* Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: java.util.Map[String, String]): DataFrameReader = {
this.options(options.asScala)
this
}
/**
* Loads input in as a `DataFrame`, for data sources that don't require a path (e.g. external
* key-value stores).
*
* @since 1.4.0
*/
def load(): DataFrame = {
load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that require a path (e.g. data backed by
* a local or distributed file system).
*
* @since 1.4.0
*/
def load(path: String): DataFrame = {
// force invocation of `load(...varargs...)`
option(DataSourceOptions.PATH_KEY, path).load(Seq.empty: _*)
}
/**
* Loads input in as a `DataFrame`, for data sources that support multiple paths.
* Only works if the source is a HadoopFsRelationProvider.
*
* @since 1.6.0
*/
@scala.annotation.varargs
def load(paths: String*): DataFrame = {
if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Hive data source can only be used with tables, you can not " +
"read files of Hive data source directly.")
}
val cls = DataSource.lookupDataSource(source, sparkSession.sessionState.conf)
if (classOf[DataSourceV2].isAssignableFrom(cls)) {
val ds = cls.newInstance().asInstanceOf[DataSourceV2]
if (ds.isInstanceOf[BatchReadSupportProvider]) {
val sessionOptions = DataSourceV2Utils.extractSessionConfigs(
ds = ds, conf = sparkSession.sessionState.conf)
val pathsOption = {
val objectMapper = new ObjectMapper()
DataSourceOptions.PATHS_KEY -> objectMapper.writeValueAsString(paths.toArray)
}
Dataset.ofRows(sparkSession, DataSourceV2Relation.create(
ds, extraOptions.toMap ++ sessionOptions + pathsOption,
userSpecifiedSchema = userSpecifiedSchema))
} else {
loadV1Source(paths: _*)
}
} else {
loadV1Source(paths: _*)
}
}
private def loadV1Source(paths: String*) = {
// Code path for data source v1.
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
userSpecifiedSchema = userSpecifiedSchema,
className = source,
options = extraOptions.toMap).resolveRelation())
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table and connection properties.
*
* @since 1.4.0
*/
def jdbc(url: String, table: String, properties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// properties should override settings in extraOptions.
this.extraOptions ++= properties.asScala
// explicit url and dbtable should override all
this.extraOptions += (JDBCOptions.JDBC_URL -> url, JDBCOptions.JDBC_TABLE_NAME -> table)
format("jdbc").load()
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table. Partitions of the table will be retrieved in parallel based on the parameters
* passed to this function.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`.
* @param table Name of the table in the external database.
* @param columnName the name of a column of integral type that will be used for partitioning.
* @param lowerBound the minimum value of `columnName` used to decide partition stride.
* @param upperBound the maximum value of `columnName` used to decide partition stride.
* @param numPartitions the number of partitions. This, along with `lowerBound` (inclusive),
* `upperBound` (exclusive), form partition strides for generated WHERE
* clause expressions used to split the column `columnName` evenly. When
* the input is less than 1, the number is set to 1.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch and "queryTimeout" can be used to wait
* for a Statement object to execute to the given number of seconds.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
columnName: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
connectionProperties: Properties): DataFrame = {
// columnName, lowerBound, upperBound and numPartitions override settings in extraOptions.
this.extraOptions ++= Map(
JDBCOptions.JDBC_PARTITION_COLUMN -> columnName,
JDBCOptions.JDBC_LOWER_BOUND -> lowerBound.toString,
JDBCOptions.JDBC_UPPER_BOUND -> upperBound.toString,
JDBCOptions.JDBC_NUM_PARTITIONS -> numPartitions.toString)
jdbc(url, table, connectionProperties)
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table using connection properties. The `predicates` parameter gives a list
* expressions suitable for inclusion in WHERE clauses; each one defines one partition
* of the `DataFrame`.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`
* @param table Name of the table in the external database.
* @param predicates Condition in the where clause for each partition.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// connectionProperties should override settings in extraOptions.
val params = extraOptions.toMap ++ connectionProperties.asScala.toMap
val options = new JDBCOptions(url, table, params)
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
JDBCPartition(part, i) : Partition
}
val relation = JDBCRelation(parts, options)(sparkSession)
sparkSession.baseRelationToDataFrame(relation)
}
/**
* Loads a JSON file and returns the results as a `DataFrame`.
*
* See the documentation on the overloaded `json()` method with varargs for more details.
*
* @since 1.4.0
*/
def json(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
json(Seq(path): _*)
}
/**
* Loads JSON files and returns the results as a `DataFrame`.
*
* <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by
* default. For JSON (one record per file), set the `multiLine` option to true.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.
*
* You can set the following JSON-specific options to deal with non-standard JSON files:
* <ul>
* <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li>
* <li>`prefersDecimal` (default `false`): infers all floating-point values as a decimal
* type. If the values do not fit in decimal, then it infers them as doubles.</li>
* <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li>
* <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li>
* <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes
* </li>
* <li>`allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers
* (e.g. 00012)</li>
* <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows accepting quoting of all
* character using backslash quoting mechanism</li>
* <li>`allowUnquotedControlChars` (default `false`): allows JSON Strings to contain unquoted
* control characters (ASCII characters with value less than 32, including tab and line feed
* characters) or not.</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing.
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets other fields to `null`. To
* keep corrupt records, an user can set a string type field named
* `columnNameOfCorruptRecord` in an user-defined schema. If a schema does not have the
* field, it drops corrupt records during parsing. When inferring a schema, it implicitly
* adds a `columnNameOfCorruptRecord` field in an output schema.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines,
* per file</li>
* <li>`encoding` (by default it is not set): allows to forcibly set one of standard basic
* or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If the encoding
* is not specified and `multiLine` is set to `true`, it will be detected automatically.</li>
* <li>`lineSep` (default covers all `\r`, `\r\n` and `\n`): defines the line separator
* that should be used for parsing.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of input JSON objects used
* for schema inferring.</li>
* <li>`dropFieldIfAllNull` (default `false`): whether to ignore column of all null values or
* empty array/struct during schema inference.</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
* Loads a `JavaRDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON
* Lines text format or newline-delimited JSON</a>) and returns the result as
* a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: JavaRDD[String]): DataFrame = json(jsonRDD.rdd)
/**
* Loads an `RDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: RDD[String]): DataFrame = {
json(sparkSession.createDataset(jsonRDD)(Encoders.STRING))
}
/**
* Loads a `Dataset[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonDataset input Dataset with one JSON object per record
* @since 2.2.0
*/
def json(jsonDataset: Dataset[String]): DataFrame = {
val parsedOptions = new JSONOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val schema = userSpecifiedSchema.getOrElse {
TextInputJsonDataSource.inferFromDataset(jsonDataset, parsedOptions)
}
verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val createParser = CreateJacksonParser.string _
val parsed = jsonDataset.rdd.mapPartitions { iter =>
val rawParser = new JacksonParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => rawParser.parse(input, createParser, UTF8String.fromString),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord,
parsedOptions.multiLine)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = jsonDataset.isStreaming)
}
/**
* Loads a CSV file and returns the result as a `DataFrame`. See the documentation on the
* other overloaded `csv()` method for more details.
*
* @since 2.0.0
*/
def csv(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
csv(Seq(path): _*)
}
/**
* Loads an `Dataset[String]` storing CSV rows and returns the result as a `DataFrame`.
*
* If the schema is not specified using `schema` function and `inferSchema` option is enabled,
* this function goes through the input once to determine the input schema.
*
* If the schema is not specified using `schema` function and `inferSchema` option is disabled,
* it determines the columns as string types and it reads only the first line to determine the
* names and the number of fields.
*
* If the enforceSchema is set to `false`, only the CSV header in the first line is checked
* to conform specified or inferred schema.
*
* @param csvDataset input Dataset with one CSV row per record
* @since 2.2.0
*/
def csv(csvDataset: Dataset[String]): DataFrame = {
val parsedOptions: CSVOptions = new CSVOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.csvColumnPruning,
sparkSession.sessionState.conf.sessionLocalTimeZone)
val filteredLines: Dataset[String] =
CSVUtils.filterCommentAndEmpty(csvDataset, parsedOptions)
val maybeFirstLine: Option[String] = filteredLines.take(1).headOption
val schema = userSpecifiedSchema.getOrElse {
TextInputCSVDataSource.inferFromDataset(
sparkSession,
csvDataset,
maybeFirstLine,
parsedOptions)
}
verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val linesWithoutHeader: RDD[String] = maybeFirstLine.map { firstLine =>
val parser = new CsvParser(parsedOptions.asParserSettings)
val columnNames = parser.parseLine(firstLine)
CSVDataSource.checkHeaderColumnNames(
actualSchema,
columnNames,
csvDataset.getClass.getCanonicalName,
parsedOptions.enforceSchema,
sparkSession.sessionState.conf.caseSensitiveAnalysis)
filteredLines.rdd.mapPartitions(CSVUtils.filterHeaderLine(_, firstLine, parsedOptions))
}.getOrElse(filteredLines.rdd)
val parsed = linesWithoutHeader.mapPartitions { iter =>
val rawParser = new UnivocityParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => Seq(rawParser.parse(input)),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord,
parsedOptions.multiLine)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = csvDataset.isStreaming)
}
/**
* Loads CSV files and returns the result as a `DataFrame`.
*
* This function will go through the input once to determine the input schema if `inferSchema`
* is enabled. To avoid going through the entire data once, disable `inferSchema` option or
* specify the schema explicitly using `schema`.
*
* You can set the following CSV-specific options to deal with CSV files:
* <ul>
* <li>`sep` (default `,`): sets a single character as a separator for each
* field and value.</li>
* <li>`encoding` (default `UTF-8`): decodes the CSV files by the given encoding
* type.</li>
* <li>`quote` (default `"`): sets a single character used for escaping quoted values where
* the separator can be part of the value. If you would like to turn off quotations, you need to
* set not `null` but an empty string. This behaviour is different from
* `com.databricks.spark.csv`.</li>
* <li>`escape` (default `\`): sets a single character used for escaping quotes inside
* an already quoted value.</li>
* <li>`charToEscapeQuoteEscaping` (default `escape` or `\0`): sets a single character used for
* escaping the escape for the quote character. The default value is escape character when escape
* and quote characters are different, `\0` otherwise.</li>
* <li>`comment` (default empty string): sets a single character used for skipping lines
* beginning with this character. By default, it is disabled.</li>
* <li>`header` (default `false`): uses the first line as names of columns.</li>
* <li>`enforceSchema` (default `true`): If it is set to `true`, the specified or inferred schema
* will be forcibly applied to datasource files, and headers in CSV files will be ignored.
* If the option is set to `false`, the schema will be validated against all headers in CSV files
* in the case when the `header` option is set to `true`. Field names in the schema
* and column names in CSV headers are checked by their positions taking into account
* `spark.sql.caseSensitive`. Though the default value is true, it is recommended to disable
* the `enforceSchema` option to avoid incorrect results.</li>
* <li>`inferSchema` (default `false`): infers the input schema automatically from data. It
* requires one extra pass over the data.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of rows used for schema inferring.</li>
* <li>`ignoreLeadingWhiteSpace` (default `false`): a flag indicating whether or not leading
* whitespaces from values being read should be skipped.</li>
* <li>`ignoreTrailingWhiteSpace` (default `false`): a flag indicating whether or not trailing
* whitespaces from values being read should be skipped.</li>
* <li>`nullValue` (default empty string): sets the string representation of a null value. Since
* 2.0.1, this applies to all supported types including the string type.</li>
* <li>`nanValue` (default `NaN`): sets the string representation of a non-number" value.</li>
* <li>`positiveInf` (default `Inf`): sets the string representation of a positive infinity
* value.</li>
* <li>`negativeInf` (default `-Inf`): sets the string representation of a negative infinity
* value.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`maxColumns` (default `20480`): defines a hard limit of how many columns
* a record can have.</li>
* <li>`maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed
* for any given value being read. By default, it is -1 meaning unlimited length</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing. It supports the following case-insensitive modes.
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets other fields to `null`. To keep
* corrupt records, an user can set a string type field named `columnNameOfCorruptRecord`
* in an user-defined schema. If a schema does not have the field, it drops corrupt records
* during parsing. A record with less/more tokens than schema is not a corrupted record to
* CSV. When it meets a record having fewer tokens than the length of the schema, sets
* `null` to extra fields. When the record has more tokens than the length of the schema,
* it drops extra tokens.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines.</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def csv(paths: String*): DataFrame = format("csv").load(paths : _*)
/**
* Loads a Parquet file, returning the result as a `DataFrame`. See the documentation
* on the other overloaded `parquet()` method for more details.
*
* @since 2.0.0
*/
def parquet(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
parquet(Seq(path): _*)
}
/**
* Loads a Parquet file, returning the result as a `DataFrame`.
*
* You can set the following Parquet-specific option(s) for reading Parquet files:
* <ul>
* <li>`mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets
* whether we should merge schemas collected from all Parquet part-files. This will override
* `spark.sql.parquet.mergeSchema`.</li>
* </ul>
* @since 1.4.0
*/
@scala.annotation.varargs
def parquet(paths: String*): DataFrame = {
format("parquet").load(paths: _*)
}
/**
* Loads an ORC file and returns the result as a `DataFrame`.
*
* @param path input path
* @since 1.5.0
* @note Currently, this method can only be used after enabling Hive support.
*/
def orc(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
orc(Seq(path): _*)
}
/**
* Loads ORC files and returns the result as a `DataFrame`.
*
* @param paths input paths
* @since 2.0.0
* @note Currently, this method can only be used after enabling Hive support.
*/
@scala.annotation.varargs
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)
/**
* Returns the specified table as a `DataFrame`.
*
* @since 1.4.0
*/
def table(tableName: String): DataFrame = {
assertNoSpecifiedSchema("table")
sparkSession.table(tableName)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any. See the documentation on
* the other overloaded `text()` method for more details.
*
* @since 2.0.0
*/
def text(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
text(Seq(path): _*)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.text("/path/to/spark/README.md")
*
* // Java:
* spark.read().text("/path/to/spark/README.md")
* }}}
*
* You can set the following text-specific option(s) for reading text files:
* <ul>
* <li>`wholetext` (default `false`): If true, read a file as a single row and not split by "\n".
* </li>
* <li>`lineSep` (default covers all `\r`, `\r\n` and `\n`): defines the line separator
* that should be used for parsing.</li>
* </ul>
*
* @param paths input paths
* @since 1.6.0
*/
@scala.annotation.varargs
def text(paths: String*): DataFrame = format("text").load(paths : _*)
/**
* Loads text files and returns a [[Dataset]] of String. See the documentation on the
* other overloaded `textFile()` method for more details.
* @since 2.0.0
*/
def textFile(path: String): Dataset[String] = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
textFile(Seq(path): _*)
}
/**
* Loads text files and returns a [[Dataset]] of String. The underlying schema of the Dataset
* contains a single string column named "value".
*
* If the directory structure of the text files contains partitioning information, those are
* ignored in the resulting Dataset. To include partitioning information as columns, use `text`.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.textFile("/path/to/spark/README.md")
*
* // Java:
* spark.read().textFile("/path/to/spark/README.md")
* }}}
*
* You can set the following textFile-specific option(s) for reading text files:
* <ul>
* <li>`wholetext` (default `false`): If true, read a file as a single row and not split by "\n".
* </li>
* <li>`lineSep` (default covers all `\r`, `\r\n` and `\n`): defines the line separator
* that should be used for parsing.</li>
* </ul>
*
* @param paths input path
* @since 2.0.0
*/
@scala.annotation.varargs
def textFile(paths: String*): Dataset[String] = {
assertNoSpecifiedSchema("textFile")
text(paths : _*).select("value").as[String](sparkSession.implicits.newStringEncoder)
}
/**
* A convenient function for schema validation in APIs.
*/
private def assertNoSpecifiedSchema(operation: String): Unit = {
if (userSpecifiedSchema.nonEmpty) {
throw new AnalysisException(s"User specified schema not supported with `$operation`")
}
}
/**
* A convenient function for schema validation in datasources supporting
* `columnNameOfCorruptRecord` as an option.
*/
private def verifyColumnNameOfCorruptRecord(
schema: StructType,
columnNameOfCorruptRecord: String): Unit = {
schema.getFieldIndex(columnNameOfCorruptRecord).foreach { corruptFieldIndex =>
val f = schema(corruptFieldIndex)
if (f.dataType != StringType || !f.nullable) {
throw new AnalysisException(
"The field for corrupt records must be string type and nullable")
}
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = sparkSession.sessionState.conf.defaultDataSourceName
private var userSpecifiedSchema: Option[StructType] = None
private val extraOptions = new scala.collection.mutable.HashMap[String, String]
}
| lvdongr/spark | sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala | Scala | apache-2.0 | 33,845 |
/*
* Copyright 2014 The Guardian
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.madgag.time
import _root_.java.time.{Instant, Duration, Clock}
import _root_.java.time.temporal.Temporal
import java.util.concurrent.TimeUnit
import java.{time => java}
import org.joda.{time => joda}
import scala.concurrent.duration.FiniteDuration
import scala.language.implicitConversions
object Implicits {
implicit class RichTemporal(temporal: Temporal) {
def age()(implicit clock: Clock = Clock.systemUTC) =
Duration.between(Instant.from(temporal), clock.instant())
}
implicit def javaZone2JodaDateTimeZone(zoneId: java.ZoneId): joda.DateTimeZone =
if (zoneId.getId == "Z") joda.DateTimeZone.UTC else joda.DateTimeZone.forID(zoneId.getId)
implicit def javaZonedDateTime2JodaDateTime(zonedDateTime: java.ZonedDateTime): joda.DateTime =
new joda.DateTime(zonedDateTime.toInstant.toEpochMilli, zonedDateTime.getZone)
implicit def jodaInstant2javaInstant(instant: joda.Instant): java.Instant =
java.Instant.ofEpochMilli(instant.getMillis)
implicit def jodaDateTimeZone2javaZoneId(dateTimeZone: joda.DateTimeZone): java.ZoneId =
java.ZoneId.of(dateTimeZone.getID)
implicit def jodaDateTime2JavaZonedDateTime(dateTime: joda.DateTime): java.ZonedDateTime =
java.ZonedDateTime.ofInstant(dateTime.toInstant, dateTime.getZone)
implicit def scalaDuration2javaDuration(dur: scala.concurrent.duration.Duration) = java.Duration.ofNanos(dur.toNanos)
implicit def duration2SDuration(dur: joda.Duration) = FiniteDuration(dur.getMillis, TimeUnit.MILLISECONDS)
implicit def javaDuration2SDuration(dur: java.Duration) = FiniteDuration(dur.toMillis, TimeUnit.MILLISECONDS)
implicit def javaDuration2jodaDuration(dur: java.Duration) = joda.Duration.millis(dur.toMillis)
}
| rtyley/play-git-hub | src/main/scala/com/madgag/time/Implicits.scala | Scala | gpl-3.0 | 2,330 |
package net.tobysullivan.shorturl.test
import net.tobysullivan.shorturl._
object Configuration {
val MYSQL_HOSTNAME: String = "192.168.33.10"
val MYSQL_USERNAME: String = "root"
val MYSQL_PASSWORD: String = "root"
val MYSQL_DATABASE: String = "shorturl"
// You can optionally use the following InMemory storage instead of the MySQL DB defined below. This will be faster but not persistant
// val HASH_STORE: HashStore = InMemoryDataStore
// val STATS_STORE: StatsStore = InMemoryDataStore
val mysqlDb = new MySqlDataStore(MYSQL_HOSTNAME, MYSQL_USERNAME, MYSQL_PASSWORD, MYSQL_DATABASE)
val HASH_STORE: HashStore = mysqlDb
val STATS_STORE: StatsStore = mysqlDb
} | tobyjsullivan/shorturl | src/test/scala/net/tobysullivan/shorturl/test/Configuration.scala | Scala | mit | 686 |
package net.sansa_stack.owl.spark.rdd
import com.typesafe.scalalogging.{Logger => ScalaLogger}
import net.sansa_stack.owl.common.parsing.{RDFXMLSyntaxParsing, RDFXMLSyntaxPrefixParsing}
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapred.JobConf
import org.apache.log4j.{Level, Logger => Log4JLogger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.semanticweb.owlapi.model.OWLAxiom
import scala.collection.JavaConverters._
class RDFXMLSyntaxOWLExpressionsRDDBuilder extends Serializable with RDFXMLSyntaxPrefixParsing with RDFXMLSyntaxParsing {
private val logger = ScalaLogger(this.getClass)
/**
* Builds a snippet conforming to the RDF/XML syntax which then can
* be parsed by the OWL API RDF/XML syntax parser.
* A single expression, e.g.
*
* <rdf:Description rdf:about="http://swat.cse.lehigh.edu/onto/univ-bench.owl#Person">
* <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/>
* </rdf:Description>
*
* has thus to be wrapped into a ontology description as follows
*
* <rdf:RDF
* xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
* xmlns:owl="http://www.w3.org/2002/07/owl#">
* <owl:Class rdf:about="http://swat.cse.lehigh.edu/onto/univ-bench.owl#Person"/>
* </rdf:RDF>
*
* @param spark SparkSession
* @param filePath the path to the input file
* @return The set of axioms corresponding to the expression.
*/
def build(spark: SparkSession, filePath: String): OWLAxiomsRDD = {
val conf = new JobConf()
val prefixes = new JobConf()
conf.set("stream.recordreader.class", "org.apache.hadoop.streaming.StreamXmlRecordReader")
conf.set("stream.recordreader.begin", "<rdf:Description") // start Tag
conf.set("stream.recordreader.end", "</rdf:Description>") // End Tag
prefixes.set("stream.recordreader.class", "org.apache.hadoop.streaming.StreamXmlRecordReader")
prefixes.set("stream.recordreader.begin", "<rdf:RDF") // start Tag
prefixes.set("stream.recordreader.end", ">") // End Tag
org.apache.hadoop.mapred.FileInputFormat.addInputPaths(conf, filePath)
org.apache.hadoop.mapred.FileInputFormat.addInputPaths(prefixes, filePath)
// RDF/XML Record
// read data and save in RDD as block-RDF/XML Record
val rdfXMLRecord: RDD[(Text, Text)] = spark.sparkContext.hadoopRDD(conf,
classOf[org.apache.hadoop.streaming.StreamInputFormat],
classOf[org.apache.hadoop.io.Text],
classOf[org.apache.hadoop.io.Text])
// Convert the block-RDF/XML record to String
val rawRDD: RDD[String] = rdfXMLRecord.map { case (x, _) => x.toString }
// RDF/XML prefixes
// Read data and save in RDD as block-RDF/XML prefixes
val rdfXMLPrefixes = spark.sparkContext.hadoopRDD(prefixes,
classOf[org.apache.hadoop.streaming.StreamInputFormat],
classOf[org.apache.hadoop.io.Text],
classOf[org.apache.hadoop.io.Text])
// Convert the block-RDF/XML prefixes to String
val tmpPrefixes = rdfXMLPrefixes.map { case (x, _) => x.toString }.distinct()
val prefixesString: String = tmpPrefixes.reduce((a, b) => a + "\\n" + b)
val owlAxioms: RDD[OWLAxiom] =
rawRDD.map(record => parseRecord(record, prefixesString))
.filter(x => !x.isEmpty)
.flatMap(axioms => axioms.asScala)
.distinct()
val refinedRDD = refineOWLAxioms(spark.sparkContext, owlAxioms)
// logger.debug(s"Axioms count = ${refinedRDD.count()}")
refinedRDD
}
}
object RDFXMLSyntaxOWLExpressionsRDDBuilder {
private val logger = ScalaLogger(this.getClass)
def main(args: Array[String]): Unit = {
val input: String = getClass.getResource("/univ-bench.rdf").getPath
logger.info("================================")
logger.info("| RDF/XML Parser |")
logger.info("================================")
@transient val sparkSession = SparkSession.builder
.master("local[*]")
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.appName("RDF/XML Parser")
.getOrCreate()
Log4JLogger.getLogger("akka").setLevel(Level.OFF)
Log4JLogger.getLogger(this.getClass).setLevel(Level.ERROR)
val RDFXMLBuilder = new RDFXMLSyntaxOWLExpressionsRDDBuilder
val rdd: OWLAxiomsRDD = RDFXMLBuilder.build(sparkSession, input)
rdd.foreach(axiom => logger.info(axiom.toString))
sparkSession.stop
}
}
| SANSA-Stack/SANSA-RDF | sansa-owl/sansa-owl-spark/src/main/scala/net/sansa_stack/owl/spark/rdd/RDFXMLSyntaxOWLExpressionsRDDBuilder.scala | Scala | apache-2.0 | 4,438 |
/**
* Copyright (c) 2014 MongoDB, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* For questions and comments about this product, please see the project page at:
*
* https://github.com/mongodb/mongo-scala-driver
*
*/
import com.typesafe.sbt._
import SbtSite._
import SiteKeys._
import SbtGit._
import GitKeys._
import SbtGhPages._
import GhPagesKeys._
import org.scalastyle.sbt.ScalastylePlugin
import sbtassembly.Plugin._
import sbt._
import Keys._
import scala.Some
import AssemblyKeys._
object MongoScalaBuild extends Build {
import Dependencies._
import Resolvers._
val buildSettings = Seq(
organization := "org.mongodb",
organizationHomepage := Some(url("http://www.mongodb.org")),
version := "0.1-SNAPSHOT",
scalaVersion := "2.11.0",
libraryDependencies ++= coreDependencies ++ testDependencies,
resolvers := mongoScalaResolvers,
scalacOptions ++= Seq("-unchecked", "-deprecation", "-feature" /*, "-Xlog-implicits", "-Yinfer-debug", "-Xprint:typer" */),
scalacOptions in(Compile, doc) ++= Seq("-diagrams", "-implicits")
)
val consoleSettings = Seq(initialCommands in console := """import org.mongodb.scala._""")
/**
* Documentation
*/
val docSettings =
SbtSite.site.settings ++
SbtSite.site.sphinxSupport() ++
ghpages.settings ++
Seq(
siteSourceDirectory := file("docs"),
siteDirectory := file("target/site"),
// depending on the version, copy the api files to a different directory
siteMappings <++= (mappings in packageDoc in Compile, version) map {
(m, v) =>
for ((f, d) <- m) yield (f, if (v.trim.endsWith("SNAPSHOT")) ("api/master/" + d) else ("api/" + v + "/" + d))
},
// override the synchLocal task to avoid removing the existing files
synchLocal <<= (privateMappings, updatedRepository, ghpagesNoJekyll, gitRunner, streams) map {
(mappings, repo, noJekyll, git, s) =>
val betterMappings = mappings map {
case (file, target) => (file, repo / target)
}
IO.copy(betterMappings)
if (noJekyll) IO.touch(repo / ".nojekyll")
repo
},
ghpagesNoJekyll := true,
gitRemoteRepo := "git@github.com:mongodb/mongo-scala-driver.git"
) ++ inConfig(config("sphinx"))(Seq(sourceDirectory := file("docs")))
val scalaStyleSettings = ScalastylePlugin.Settings ++ Seq(org.scalastyle.sbt.PluginKeys.config := file("project/scalastyle-config.xml"))
val publishSettings = Publish.settings
val assemblyJarSettings = assemblySettings ++ addArtifact(Artifact("mongo-scala-driver-alldep", "jar", "jar"), assembly) ++ Seq(test in assembly := {})
// Test configuration
val testSettings = Seq(
testFrameworks += TestFrameworks.ScalaTest,
testFrameworks in PerfTest := Seq(new TestFramework("org.scalameter.ScalaMeterFramework")),
testOptions in Test := Seq(Tests.Filter(testFilter)),
testOptions in AccTest := Seq(Tests.Filter(accFilter)),
testOptions in IntTest := Seq(Tests.Filter(itFilter)),
testOptions in UnitTest := Seq(Tests.Filter(unitFilter)),
testOptions in PerfTest := Seq(Tests.Filter(perfFilter)),
parallelExecution in PerfTest := false,
logBuffered in PerfTest := false
) ++ Seq(AccTest, IntTest, UnitTest, PerfTest).flatMap {
inConfig(_)(Defaults.testTasks)
}
def accFilter(name: String): Boolean = name endsWith "ASpec"
def itFilter(name: String): Boolean = name endsWith "ISpec"
def perfFilter(name: String): Boolean = name endsWith "Benchmark"
def unitFilter(name: String): Boolean = !itFilter(name) && !accFilter(name) && !perfFilter(name)
def testFilter(name: String): Boolean = !perfFilter(name)
lazy val IntTest = config("it") extend Test
lazy val UnitTest = config("unit") extend Test
lazy val AccTest = config("acc") extend Test
lazy val PerfTest = config("perf") extend Test
/*
* Coursera styleCheck command
*/
val styleCheck = TaskKey[Unit]("styleCheck")
/**
* depend on compile to make sure the sources pass the compiler
*/
val styleCheckSetting = styleCheck <<= (compile in Compile, sources in Compile, streams) map {
(_, sourceFiles, s) =>
val logger = s.log
val (feedback, score) = StyleChecker.assess(sourceFiles)
logger.info(feedback)
logger.info(s"Style Score: $score out of ${StyleChecker.maxResult}")
}
lazy val mongoScalaDriver = Project(
id = "mongo-scala-driver",
base = file("driver")
).configs(IntTest)
.configs(AccTest)
.configs(UnitTest)
.configs(PerfTest)
.settings(buildSettings: _*)
.settings(consoleSettings: _*)
.settings(docSettings: _*)
.settings(testSettings: _*)
.settings(styleCheckSetting: _*)
.settings(scalaStyleSettings: _*)
.settings(publishSettings: _*)
.settings(assemblyJarSettings: _*)
override def rootProject = Some(mongoScalaDriver)
}
| antonnik/code-classifier | naive_bayes/resources/scala/MongoScalaBuild.scala | Scala | apache-2.0 | 5,718 |
package views.html
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object Devault extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template0[play.api.templates.HtmlFormat.Appendable] {
/**/
def apply():play.api.templates.HtmlFormat.Appendable = {
_display_ {
Seq[Any](_display_(Seq[Any](/*1.2*/Main("Imaikalani DeVault")/*1.28*/ {_display_(Seq[Any](format.raw/*1.30*/("""
<div class="container">
<div class="row">
<div class="col-md-4">
<div class="well">
<div class="devault"></div>
</div>
</div>
<div class="well col-md-8">
<h3>Imaikalani DeVault</h3>
<p>The Valley Isle's Imaikalani DeValult is yet another contender from Maui poised to break onto the world
stage. Like so many of the other Maui boys that came before him, Imai's stomping grounds are the rippable peaks
of Ho'okipa. Above the lip, he's on the same level as many of his peers and his style is downright buttery.
Although he's had some solid results in the past, for Imaikalani to further his career to the next level, he'll
need to make another hard push on the contest scene next year. Lucky for him, he's got Volcom's esteemed coach,
Dave Riddle, in his corner lighting the path ahead.</p>
</div>
</div>
</div>
""")))})),format.raw/*20.2*/("""
"""))}
}
def render(): play.api.templates.HtmlFormat.Appendable = apply()
def f:(() => play.api.templates.HtmlFormat.Appendable) = () => apply()
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Fri Jan 10 22:31:23 HST 2014
SOURCE: /Users/eduardgamiao/Desktop/ICS414/Mepedia/app/views/Devault.scala.html
HASH: cc0559e8c566680613dc5db5a16642f1ccdcd999
MATRIX: 866->1|900->27|939->29|1874->933
LINES: 29->1|29->1|29->1|48->20
-- GENERATED --
*/
| eduardgamiao/Mepedia | target/scala-2.10/src_managed/main/views/html/Devault.template.scala | Scala | gpl-3.0 | 2,541 |
package cromwell.core.retry
import cromwell.core.retry.Retry._
import cromwell.core.{CromwellFatalException, TestKitSuite}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{FlatSpecLike, Matchers}
import scala.concurrent.Future
class RetrySpec extends TestKitSuite("retry-spec") with FlatSpecLike with Matchers with ScalaFutures {
class TransientException extends Exception
class MockWork(n: Int, transients: Int = 0) {
implicit val ec = system.dispatcher
var counter = n
def doIt(): Future[Int] = {
if (counter == 0)
Future.successful(9)
else {
counter -= 1
val ex = if (counter <= transients) new TransientException else new IllegalArgumentException("Failed")
Future.failed(ex)
}
}
}
implicit val defaultPatience = PatienceConfig(timeout = Span(30, Seconds), interval = Span(100, Millis))
private def runRetry(retries: Int,
work: MockWork,
isTransient: Throwable => Boolean = Retry.throwableToFalse,
isFatal: Throwable => Boolean = Retry.throwableToFalse): Future[Int] = {
withRetry(
f = () => work.doIt(),
maxRetries = Option(retries),
isTransient = isTransient,
isFatal = isFatal
)
}
"Retry" should "retry a function until it works" in {
val work = new MockWork(2)
whenReady(runRetry(3, work)) { x =>
x shouldBe 9
work.counter shouldBe 0
}
}
it should "fail if it hits the max retry count" in {
whenReady(runRetry(1, new MockWork(3)).failed) { x =>
x shouldBe an [CromwellFatalException]
}
}
it should "fail if it hits a fatal exception" in {
val work = new MockWork(3)
whenReady(runRetry(3, work, isFatal = (t: Throwable) => t.isInstanceOf[IllegalArgumentException]).failed) { x =>
x shouldBe an [CromwellFatalException]
work.counter shouldBe 2
}
val work2 = new MockWork(4, 2)
val retry = runRetry(4,
work2,
isFatal = (t: Throwable) => t.isInstanceOf[IllegalArgumentException],
isTransient = (t: Throwable) => t.isInstanceOf[TransientException])
whenReady(retry.failed) { x =>
x shouldBe an [CromwellFatalException]
work2.counter shouldBe 3
}
}
it should "not count transient errors against the max limit" in {
val work = new MockWork(3, 1)
whenReady(runRetry(3, work, isTransient = (t: Throwable) => t.isInstanceOf[TransientException])) { x =>
x shouldBe 9
work.counter shouldBe 0
}
}
}
| ohsu-comp-bio/cromwell | core/src/test/scala/cromwell/core/retry/RetrySpec.scala | Scala | bsd-3-clause | 2,604 |
package com.productfoundry.akka.cqrs.process
import com.productfoundry.akka.serialization.Persistable
case class DeduplicationEntry(deduplicationId: String) extends Persistable
| Product-Foundry/akka-cqrs | core/src/main/scala/com/productfoundry/akka/cqrs/process/DeduplicationEntry.scala | Scala | apache-2.0 | 179 |
package io.getquill.context.jasync
import com.github.jasync.sql.db.Connection
import scala.concurrent.ExecutionContext
case class TransactionalExecutionContext(ec: ExecutionContext, conn: Connection)
extends ExecutionContext {
def execute(runnable: Runnable): Unit =
ec.execute(runnable)
def reportFailure(cause: Throwable): Unit =
ec.reportFailure(cause)
}
| getquill/quill | quill-jasync/src/main/scala/io/getquill/context/jasync/TransactionalExecutionContext.scala | Scala | apache-2.0 | 377 |
package utils
import java.io.File
import breeze.linalg._
import scala.reflect.ClassTag
import scala.util.Random
/**
* A collection of utilities useful for matrices.
*/
object MatrixUtils extends Serializable {
/**
* Converts a matrix to an array of row arrays.
* @param mat Input matrix.
* @return Array of rows.
*/
def matrixToRowArray[T](mat: DenseMatrix[T]): Array[DenseVector[T]] = {
val matT = mat.t
(0 until mat.rows).toArray.map(matT(::, _))
}
/**
* Converts a sequence of DenseVector to a matrix where each vector is a row.
*
* @param in Sequence of of DenseVectors (rows)
* @return A row matrix.
*/
def rowsToMatrix[T : ClassTag](in: TraversableOnce[DenseVector[T]]): DenseMatrix[T] = {
rowsToMatrix(in.toArray)
}
/**
* Converts an array of DenseVector to a matrix where each vector is a row.
*
* @param inArr Array of DenseVectors (rows)
* @return A row matrix.
*/
def rowsToMatrix[T : ClassTag](inArr: Array[DenseVector[T]]): DenseMatrix[T] = {
val nRows = inArr.length
val nCols = inArr(0).length
val outArr = new Array[T](nRows * nCols)
var i = 0
while (i < nRows) {
var j = 0
val row = inArr(i)
while (j < nCols) {
outArr(i + nRows * j) = row(j)
j = j + 1
}
i = i + 1
}
val outMat = new DenseMatrix[T](nRows, nCols, outArr)
outMat
}
/**
* Draw samples rows from a matrix.
*
* @param in Input matrix.
* @param numSamples Number of samples to draw.
* @return A matrix constructed from a sample of the rows.
*/
def sampleRows(in: DenseMatrix[Double], numSamples: Int): DenseMatrix[Double] = {
val rows = Random.shuffle(0 to (in.rows-1)).take(numSamples).sorted
(in(rows,::)).toDenseMatrix
}
// In place deterministic shuffle
def shuffleArray[T](arr: Array[T], seed: Int = 42) = {
// Shuffle each row in the same fashion
val rnd = new java.util.Random(seed)
var i = arr.length - 1
while (i > 0) {
val index = rnd.nextInt(i + 1)
// Simple swap
val a = arr(index)
arr(index) = arr(i)
arr(i) = a
i = i - 1
}
arr
}
}
| o0neup/keystone | src/main/scala/utils/MatrixUtils.scala | Scala | apache-2.0 | 2,184 |
package fly.play.aws
import java.net.URLEncoder
import java.util.{Calendar, Date, TimeZone}
import akka.util.ByteString
import fly.play.s3.S3SpecSetup
import play.api.libs.ws.{BodyWritable, InMemoryBody, WSRequest}
import play.api.test.WsTestClient
import scala.collection.mutable
import scala.language.reflectiveCalls
object Aws4SignerSpec extends S3SpecSetup {
"Aws4Signer" should {
"example1" >> {
val expectedCannonicalRequest =
"""|GET
|/
|Action=GetSessionToken&DurationSeconds=3600&Version=2011-06-15
|host:sts.amazonaws.com
|x-amz-date:20120519T004356Z
|
|host;x-amz-date
|e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""".stripMargin
val expectedStringToSign =
"""|AWS4-HMAC-SHA256
|20120519T004356Z
|20120519/us-east-1/sts/aws4_request
|ec19857897328f82cfb526a6bae44824ad717e58272c3a018545b658ceba425d""".stripMargin
val signer = {
val cal = Calendar.getInstance(TimeZone.getTimeZone("GMT"))
cal.set(2012, 4, 19, 0, 43, 56)
newSigner("sts", cal.getTime)
}
WsTestClient.withClient { client =>
val request =
client
.url("https://sts.amazonaws.com")
.addQueryStringParameters(
"Action" -> "GetSessionToken",
"DurationSeconds" -> "3600",
"Version" -> "2011-06-15")
val signedRequest = signer.sign(request, "GET", Array.empty)
test(signer, "cannonicalRequest", expectedCannonicalRequest)
test(signer, "stringToSign", expectedStringToSign)
"signed request date header" in {
signedRequest.headers("X-Amz-Date") must_== Seq("20120519T004356Z")
}
}
}
"example1 with temp credentials" >> {
val expectedCannonicalRequest =
"""|GET
|/
|Action=GetSessionToken&DurationSeconds=3600&Version=2011-06-15
|host:sts.amazonaws.com
|x-amz-date:20120519T004356Z
|x-amz-security-token:securitytoken
|
|host;x-amz-date;x-amz-security-token
|e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""".stripMargin
val expectedStringToSign =
"""|AWS4-HMAC-SHA256
|20120519T004356Z
|20120519/us-east-1/sts/aws4_request
|c3e0a99e512739a75104ce81eb353977af1f5bd8cfd4ade2db6b65d3c6f35d74""".stripMargin
val tempCredentials = AwsCredentials("AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", Some("securitytoken"))
val signer = {
val cal = Calendar.getInstance(TimeZone.getTimeZone("GMT"))
cal.set(2012, 4, 19, 0, 43, 56)
newSigner("sts", cal.getTime, tempCredentials)
}
WsTestClient.withClient { client =>
val request =
client
.url("https://sts.amazonaws.com")
.addQueryStringParameters(
"Action" -> "GetSessionToken",
"DurationSeconds" -> "3600",
"Version" -> "2011-06-15")
val signedRequest = signer.sign(request, "GET", Array.empty)
test(signer, "cannonicalRequest", expectedCannonicalRequest)
test(signer, "stringToSign", expectedStringToSign)
"signed request date header" in {
signedRequest.headers("X-Amz-Date") must_== Seq("20120519T004356Z")
}
}
}
"example2 GET Object" >> {
val expectedCannonicalRequest =
"""|GET
|/test.txt
|
|host:examplebucket.s3.amazonaws.com
|range:bytes=0-9
|x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
|x-amz-date:20130524T000000Z
|
|host;range;x-amz-content-sha256;x-amz-date
|e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""".stripMargin
val expectedStringToSign =
"""|AWS4-HMAC-SHA256
|20130524T000000Z
|20130524/us-east-1/s3/aws4_request
|7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972""".stripMargin
val expectedSignature = "f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41"
val expectedAuthorizationHeader = "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;range;x-amz-content-sha256;x-amz-date,Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41"
val signer = newSigner()
WsTestClient.withClient { client =>
val request =
client
.url("http://examplebucket.s3.amazonaws.com/test.txt")
.addHttpHeaders(
signer.amzContentSha256(Array.empty),
"Range" -> "bytes=0-9")
val signedRequest = signer.sign(request, "GET", Array.empty)
test(signer, "cannonicalRequest", expectedCannonicalRequest)
test(signer, "stringToSign", expectedStringToSign)
test(signer, "signature", expectedSignature)
test(signer, "authorizationHeader", expectedAuthorizationHeader)
"signed request" in {
signedRequest.headers("Authorization") must_== Seq(expectedAuthorizationHeader)
}
}
}
"example3 PUT Object" >> {
val expectedCannonicalRequest =
if (play.core.PlayVersion.current.startsWith("2.7."))
"""|PUT
|/test%24file.text
|
|content-type:text/plain; charset=UTF-8
|date:Fri, 24 May 2013 00:00:00 GMT
|host:examplebucket.s3.amazonaws.com
|x-amz-content-sha256:44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072
|x-amz-date:20130524T000000Z
|x-amz-storage-class:REDUCED_REDUNDANCY
|
|content-type;date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class
|44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072""".stripMargin
else
"""|PUT
|/test%24file.text
|
|content-type:text/plain
|date:Fri, 24 May 2013 00:00:00 GMT
|host:examplebucket.s3.amazonaws.com
|x-amz-content-sha256:44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072
|x-amz-date:20130524T000000Z
|x-amz-storage-class:REDUCED_REDUNDANCY
|
|content-type;date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class
|44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072""".stripMargin
val expectedStringToSign =
if (play.core.PlayVersion.current.startsWith("2.7."))
"""|AWS4-HMAC-SHA256
|20130524T000000Z
|20130524/us-east-1/s3/aws4_request
|f8bc133040708ec4d9462312b10d102a942e6984358391d90e7df4e66d07b0e0""".stripMargin
else
"""|AWS4-HMAC-SHA256
|20130524T000000Z
|20130524/us-east-1/s3/aws4_request
|0f10f84734b64a0f7ac71f28a26c6d34d07bc39df988e9e9c39bfc1fc154b6cd""".stripMargin
val expectedSignature =
if (play.core.PlayVersion.current.startsWith("2.7."))
"fae025cee8702959355df87bdd1215eb556e1d70417f5ce18edd46e1dab34d40"
else
"f093977030bf8d8069918f1b3546fd02cf697d43e763c40d58c109a2e197bdac"
val expectedAuthorizationHeader =
if (play.core.PlayVersion.current.startsWith("2.7."))
"AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-type;date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class,Signature=fae025cee8702959355df87bdd1215eb556e1d70417f5ce18edd46e1dab34d40"
else
"AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-type;date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class,Signature=f093977030bf8d8069918f1b3546fd02cf697d43e763c40d58c109a2e197bdac"
val signer = newSigner()
val body = "Welcome to Amazon S3."
implicit def bodyWritable: BodyWritable[String] = BodyWritable(s => InMemoryBody(ByteString(s)), "text/plain")
WsTestClient.withClient { client =>
val request =
client
.url("http://examplebucket.s3.amazonaws.com/" + URLEncoder.encode("test$file.text", "UTF-8"))
.addHttpHeaders(
"x-amz-storage-class" -> "REDUCED_REDUNDANCY",
"Date" -> "Fri, 24 May 2013 00:00:00 GMT",
signer.amzContentSha256(body.getBytes))
.withBody(body)
val signedRequest = signer.sign(request, "PUT", body.getBytes)
test(signer, "cannonicalRequest", expectedCannonicalRequest)
test(signer, "stringToSign", expectedStringToSign)
test(signer, "signature", expectedSignature)
test(signer, "authorizationHeader", expectedAuthorizationHeader)
"signed request" in {
signedRequest.headers("Authorization") must_== Seq(expectedAuthorizationHeader)
}
}
}
"example4 GET Bucket Lifecycle" >> {
val expectedCannonicalRequest =
"""|GET
|/
|lifecycle=
|host:examplebucket.s3.amazonaws.com
|x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
|x-amz-date:20130524T000000Z
|
|host;x-amz-content-sha256;x-amz-date
|e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""".stripMargin
val expectedStringToSign =
"""|AWS4-HMAC-SHA256
|20130524T000000Z
|20130524/us-east-1/s3/aws4_request
|9766c798316ff2757b517bc739a67f6213b4ab36dd5da2f94eaebf79c77395ca""".stripMargin
val expectedSignature =
"fea454ca298b7da1c68078a5d1bdbfbbe0d65c699e0f91ac7a200a0136783543"
val expectedAuthorizationHeader =
"AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=fea454ca298b7da1c68078a5d1bdbfbbe0d65c699e0f91ac7a200a0136783543"
val signer = newSigner()
WsTestClient.withClient { client =>
val request =
client
.url("http://examplebucket.s3.amazonaws.com")
.addQueryStringParameters("lifecycle" -> "")
.addHttpHeaders(signer.amzContentSha256(Array.empty))
val signedRequest = signer.sign(request, "GET", Array.empty)
test(signer, "cannonicalRequest", expectedCannonicalRequest)
test(signer, "stringToSign", expectedStringToSign)
test(signer, "signature", expectedSignature)
test(signer, "authorizationHeader", expectedAuthorizationHeader)
"signed request" in {
signedRequest.headers("Authorization") must_== Seq(expectedAuthorizationHeader)
}
}
}
"example5 Get Bucket (List Objects)" >> {
val expectedCannonicalRequest =
"""|GET
|/
|max-keys=2&prefix=J
|host:examplebucket.s3.amazonaws.com
|x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
|x-amz-date:20130524T000000Z
|
|host;x-amz-content-sha256;x-amz-date
|e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""".stripMargin
val expectedStringToSign =
"""|AWS4-HMAC-SHA256
|20130524T000000Z
|20130524/us-east-1/s3/aws4_request
|df57d21db20da04d7fa30298dd4488ba3a2b47ca3a489c74750e0f1e7df1b9b7""".stripMargin
val expectedSignature =
"34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7"
val expectedAuthorizationHeader =
"AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7"
val signer = new AmzContentHeaderSignerSpy()
WsTestClient.withClient { client =>
val request =
client
.url("http://examplebucket.s3.amazonaws.com")
.addQueryStringParameters(
"prefix" -> "J",
"max-keys" -> "2")
val signedRequest = signer.sign(request, "GET", Array.empty)
test(signer, "cannonicalRequest", expectedCannonicalRequest)
test(signer, "stringToSign", expectedStringToSign)
test(signer, "signature", expectedSignature)
test(signer, "authorizationHeader", expectedAuthorizationHeader)
"don't include headers twice" in {
signedRequest.headers("X-Amz-Content-Sha256").size must_== 1
}
"signed request" in {
signedRequest.headers("Authorization") must_== Seq(expectedAuthorizationHeader)
}
}
}
"example6 url signature" in {
val expectedCannonicalRequest =
"""|GET
|/test.txt
|X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host
|host:examplebucket.s3.amazonaws.com
|
|host
|UNSIGNED-PAYLOAD""".stripMargin
val expectedStringToSign =
"""|AWS4-HMAC-SHA256
|20130524T000000Z
|20130524/us-east-1/s3/aws4_request
|3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04""".stripMargin
val expectedSignature =
"aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404"
val expectedUrl = "https://examplebucket.s3.amazonaws.com/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404&X-Amz-SignedHeaders=host"
val signer = newSigner()
val url = signer.signUrl("GET", "https://examplebucket.s3.amazonaws.com/test.txt", 86400)
test(signer, "cannonicalRequest", expectedCannonicalRequest)
test(signer, "stringToSign", expectedStringToSign)
test(signer, "signature", expectedSignature)
case class Content(url: String, queryString: Set[String])
class Url(url: String) {
val content = {
val Array(urlPart, queryStringPart) = url.split("\\?")
Content(urlPart, queryStringPart.split("&").toSet)
}
}
"signed url" in {
new Url(url).content must_== new Url(expectedUrl).content
}
}
}
val defaultCredentials = AwsCredentials("AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
val defaultDate = {
val cal = Calendar.getInstance(TimeZone.getTimeZone("GMT"))
cal.set(2013, 4, 24, 0, 0, 0)
cal.getTime
}
def newSigner(service: String = "s3", date: Date = defaultDate, credentials: AwsCredentials = defaultCredentials) =
new SignerSpy(service, date, credentials)
def test(signer: { def results: mutable.Map[String, String] }, name: String, expected: String) =
name in {
signer.results(name) must_== expected
}
class SignerSpy(service: String = "s3", date: Date = defaultDate, credentials: AwsCredentials = defaultCredentials)
extends Aws4Signer(credentials, service, "us-east-1") {
val results = mutable.Map.empty[String, String]
override def currentDate = date
override def createAuthorizationHeader(scope: Scope, signedHeaders: String, signature: String): String = {
val authorizationHeader = super.createAuthorizationHeader(scope, signedHeaders, signature)
results += "authorizationHeader" -> authorizationHeader
authorizationHeader
}
override def createStringToSign(scope: Scope, cannonicalRequest: String): String = {
val stringToSign = super.createStringToSign(scope, cannonicalRequest)
results += "stringToSign" -> stringToSign
stringToSign
}
override def createCannonicalRequest(request: AwsRequest) = {
val cannonicalRequest = super.createCannonicalRequest(request)
results += "cannonicalRequest" -> cannonicalRequest
cannonicalRequest
}
override def createSignature(stringToSign: String, scope: Scope) = {
val signature = super.createSignature(stringToSign, scope)
results += "signature" -> signature
signature
}
}
class AmzContentHeaderSignerSpy extends SignerSpy {
override def sign(request: WSRequest, method: String, body: Array[Byte]): WSRequest =
super.sign(request.addHttpHeaders(amzContentSha256(Array.empty)), method, body)
}
}
| Rhinofly/play-s3 | src/test/scala/fly/play/aws/Aws4SignerSpec.scala | Scala | mit | 16,832 |
package blended.activemq.brokerstarter.internal
import org.apache.activemq.broker.region.Destination
import org.apache.activemq.broker.region.policy.DeadLetterStrategy
import org.apache.activemq.broker.{BrokerPluginSupport, ProducerBrokerExchange}
import org.apache.activemq.command.{ActiveMQDestination, ActiveMQMessage, Message}
import scala.concurrent.duration.FiniteDuration
/**
* An Active MQ broker plugin which will enforce the TTL for given destination regardless whether
* the client has set the TTL. This actually breaks the JMS specification !!!
*/
class TTLEnforcingBrokerPlugin(ttls : Seq[(String, FiniteDuration)]) extends BrokerPluginSupport {
override def send(
producerExchange: ProducerBrokerExchange,
message: Message
) : Unit = {
if (!isDestinationDLQ(message)) {
overrideTTL(message).foreach{ ttl =>
val timestamp : Long = System.currentTimeMillis()
message.setTimestamp(timestamp)
message.setExpiration(timestamp + ttl)
}
}
super.send(producerExchange, message)
}
private def overrideTTL(message : Message) : Option[Long] = ttls.find { case (p, _) =>
message.getDestination().getQualifiedName().matches(p)
}.map(_._2).map(_.toMillis).filter(_ >= 0)
private def isDestinationDLQ(msg : Message) : Boolean = {
(Option(msg), Option(msg.getRegionDestination().asInstanceOf[Destination])) match {
case (Some(message), Some(regionDest)) =>
val dls : DeadLetterStrategy = regionDest.getDeadLetterStrategy()
val tmp : ActiveMQMessage = new ActiveMQMessage()
tmp.setDestination(message.getOriginalDestination())
tmp.setRegionDestination(regionDest)
val dlqDest : ActiveMQDestination = dls.getDeadLetterQueueFor(tmp, null)
dlqDest.equals(message.getDestination())
case _ => false
}
}
}
| woq-blended/blended | blended.activemq.brokerstarter/src/main/scala/blended/activemq/brokerstarter/internal/TTLEnforcingBrokerPlugin.scala | Scala | apache-2.0 | 1,852 |
package filodb.standalone
import scala.concurrent.duration._
import akka.actor.ActorRef
import akka.remote.testkit.MultiNodeConfig
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import filodb.coordinator._
import filodb.coordinator.client.LocalClient
import filodb.core.{GlobalConfig, Success}
import filodb.timeseries.TestTimeseriesProducer
object ClusterSingletonFailoverMultiNodeConfig extends MultiNodeConfig {
val first = role("first") // controller
val second = role("second")
val third = role("third")
// To be consistent, the config file is actually passed in the MultiJvmNode*.opt files, and resolved automatically
// using GlobalConfig. This means config resolution is identical between us and standalone FiloServer.
val myConfig = overrides.withFallback(GlobalConfig.systemConfig)
commonConfig(myConfig)
val ingestDuration = 70.seconds // Needs to be long enough to allow Lucene to flush index
def overrides: Config = ConfigFactory.parseString(
"""
akka {
actor.provider = cluster
remote.netty.tcp {
hostname = "127.0.0.1"
port = 0
}
}
""")
}
class ClusterSingletonFailoverSpecMultiJvmNode1 extends ClusterSingletonFailoverSpec
class ClusterSingletonFailoverSpecMultiJvmNode2 extends ClusterSingletonFailoverSpec
class ClusterSingletonFailoverSpecMultiJvmNode3 extends ClusterSingletonFailoverSpec
/**
* This Multi-JVM Test validates cluster singleton failover when the node running the singleton fails.
* The next node should take over and recover existing shard state and subscribers. The shard status
* should still be the same as before (other than shard down) and ingestion should resume on the new node.
* This simulates a typical upgrade scenario.
*
* How to run:
* 1. Start Kafka and Cassandra on default ports:
* 2. Setup kafka topic:
* kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 4 --topic timeseries-dev
* 3. Run test using:
* standalone/multi-jvm:testOnly filodb.standalone.ClusterSingletonFailoverSpec
*
* NOTE: this test will run as part of the standard test directive when MAYBE_MULTI_JVM is set in the environment
*/
abstract class ClusterSingletonFailoverSpec extends StandaloneMultiJvmSpec(ClusterSingletonFailoverMultiNodeConfig)
with ScalaFutures {
import ClusterSingletonFailoverMultiNodeConfig._
// Used for first start of servers on each node, stopped in test
lazy val server = new FiloServer(watcher.ref)
lazy val client1 = new LocalClient(server.cluster.coordinatorActor)
logger.info(s"Accessed metaStore and colStore. Cluster should _not_ be starting up yet.")
// Test fields
var query1Response: Double = 0
val chunkDurationTimeout = chunkDuration + 20000.millis
override def afterAll(): Unit = {
awaitNodeDown(server)
super.afterAll()
}
"ClusterSingletonFailoverSpec Multi-JVM Test" should "clear data and init dataset on node 1" in {
runOn(second) {
// Get a handle to the metastore and colstore from the server so we can clear and validate data/
// Note that merely accessing them does not start up any cluster, but we are using the server's to ensure
// that configs are consistent.
val metaStore = server.cluster.metaStore
val colStore = server.cluster.memStore.store
implicit val patienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(100, Millis))
metaStore.initialize().futureValue shouldBe Success
metaStore.clearAllData().futureValue shouldBe Success
colStore.initialize(dataset).futureValue shouldBe Success
colStore.truncate(dataset).futureValue shouldBe Success
val datasetObj = TestTimeseriesProducer.dataset
colStore.initialize(dataset).futureValue shouldBe Success
logger.info("Dataset created")
}
enterBarrier("existing-data-cleared-and-dataset-created")
}
it should "be able to bring up FiloServers in right sequence" in {
// The controller node (first) is where multi-jvm TestController runs.
// It cannot be shut down, and in this test, is not assigned shards due to
// the shard assignment strategy of youngest first.
// The cluster singleton (second) always hands over to the next oldest (first) when oldest goes down.
// The next oldest (first) then becomes oldest.
// Nodes up order: second (oldest member), first (second oldest member), third
// Shards assigned order: third (youngest), first (second-youngest)
// oldest node hosts the initial cluster singleton
runOn(second) {
awaitNodeUp(server)
watcher.expectMsgPF(20.seconds) {
case NodeProtocol.PreStart(identity, address) =>
address shouldEqual server.cluster.selfAddress
Some(identity) shouldEqual server.cluster.clusterActor
info(s" => PreStart on address=$address")
}
}
enterBarrier("second-node-started")
// second-oldest node hosts the handover cluster singleton when oldest becomes unreachable
// first is a shards-unassigned standby node and the controller
runOn(first) {
awaitNodeUp(server)
watcher.expectNoMessage(20.seconds)
}
enterBarrier("first-node-started")
// shards assigned to second-youngest node, order = second
runOn(third) {
awaitNodeUp(server)
watcher.expectNoMessage(20.seconds)
}
enterBarrier("third-node-started")
awaitCond(server.cluster.state.members.size == roles.size, longDuration)
info(s"All nodes initialized, address = ${server.cluster.selfAddress}")
runOn(roles: _*) {
info(s" ${myself.name}: ${server.cluster.selfAddress}")
}
enterBarrier(s"${roles.size}-roles-initialized")
}
it should "be able to validate the cluster status as normal via CLI" in {
runOn(first, third) {
validateShardStatus(client1, Some(server.cluster.coordinatorActor)){ _ == ShardStatusActive }
}
enterBarrier("cluster-status-normal-on-cli")
}
// NOTE: 10000 samples / 100 time series = 100 samples per series
// 100 * 10s = 1000seconds =~ 16 minutes
val queryTimestamp = System.currentTimeMillis() - 195.minutes.toMillis
it should "be able to ingest data into FiloDB via Kafka" in {
within(chunkDurationTimeout) {
runOn(third) {
TestTimeseriesProducer.produceMetrics(source, 10000, 100, 200)
info(s"Waiting for ingest-duration ($ingestDuration) to pass")
Thread.sleep(chunkDuration.toMillis + 7000)
}
enterBarrier("data1-ingested")
}
}
it should "answer query successfully" in {
runOn(first, third) { // TODO check second=UnknownDataset
query1Response = runCliQuery(client1, queryTimestamp)
}
enterBarrier("query1-answered")
}
it should "have the expected initial shard assignments for up nodes by assignment strategy" in {
within(chunkDurationTimeout * 30) {
runOn(third) {
validateShardAssignments(client1, 2, Seq(0, 1), server.cluster.coordinatorActor)
}
enterBarrier("shards-validated-on-singleton-restart-on-third")
runOn(first) {
validateShardAssignments(client1, 2, Seq(2, 3), server.cluster.coordinatorActor)
}
enterBarrier("shards-validated-on-singleton-restart-on-first")
}
}
it should "failover the ClusterSingleton and recover state on first downed" in {
// start order: second, first, third
// stop order: second (no shards but we can't stop first?)
// singleton handover: second to first
within(removedDuration) {
runOn(second) {
// TODO use the testConductor for this
// runOn(first) {
// testConductor.shutdown(second, abort=true).futureValue
info(s" Downing $second ${server.cluster.selfAddress}")
awaitNodeDown(server)
watcher.expectMsgPF(longDuration) {
case e @ NodeProtocol.PostStop(_, address) =>
address shouldEqual server.cluster.selfAddress
}
}
enterBarrier("downed-second")
}
within(removedDuration * 5) {
runOn(first) {
watcher.expectMsgPF(removedDuration) {
case e @ NodeProtocol.PreStart(_, address) =>
address shouldEqual server.cluster.selfAddress
}
}
enterBarrier("cluster-singleton-restarted-on-first")
}
runOn(first, third) {
awaitCond(server.cluster.state.members.size == roles.size - 1, removedDuration)
}
enterBarrier("cluster-member-size-2")
}
it should "down first, un-assign shards (2,3), update mappers and verify against CLI" in {
runOn(first) {
awaitNodeDown(server)
}
enterBarrier("shard-assigned-node-down")
within(removedDuration * 5) {
runOn(third) {
awaitCond(server.cluster.state.members.size == 1, longDuration)
}
enterBarrier("third-received-member-removed")
}
within(removedDuration * 10) {
runOn(third) {
client1.getShardMapper(dataset, longDuration) forall { mapper =>
mapper.shardValues.count { case (ref, status) =>
ref == ActorRef.noSender && status == ShardStatusDown } == 2 &&
// proves the fix for recovery, with a shard-assigned node that was the singleton node, downed:
// previously this was still assigned to the stale ref
// and the stale coord was in the mapper, where now it is not
mapper.unassignedShards == Seq(2, 3) &&
mapper.assignedShards == Seq(0, 1) &&
mapper.shardsForCoord(server.cluster.coordinatorActor) == Seq(0, 1)
mapper.unassignedShards.forall(s => mapper.coordForShard(s) == ActorRef.noSender) &&
mapper.allNodes.headOption.forall(_ == server.cluster.coordinatorActor) &&
mapper.allNodes.size == 1
} shouldEqual true
}
enterBarrier("on-recovery-shards-unassigned-node-removed")
}
}
ignore should "answer promQL query successfully with same value" in {
runOn(third) {
val query2Response = runCliQuery(client1, queryTimestamp)
(query2Response - query1Response).abs should be < 0.0001
}
enterBarrier("query2-answered")
}
// no way yet to have a node to reassign shards(2,3) to - constraint in test frameworks
// and node assignment strategy. 10-20 different ways tried
ignore should "be able to validate the cluster status as normal again via CLI" in {
runOn(second) {
validateShardStatus(client1, Some(server.cluster.coordinatorActor)) { _ == ShardStatusActive }
}
enterBarrier("shard-normal-end-of-test")
}
}
| velvia/FiloDB | standalone/src/multi-jvm/scala/filodb/standalone/ClusterSingletonFailoverSpec.scala | Scala | apache-2.0 | 10,685 |
package com.github.mdr.graphospasm.grapheditor.actions
import org.eclipse.gef.ui.actions.Clipboard
import org.eclipse.gef.ui.actions.SelectionAction
import org.eclipse.ui.ISharedImages
import org.eclipse.ui.IWorkbenchPart
import org.eclipse.ui.PlatformUI
import org.eclipse.ui.actions.ActionFactory
import scala.collection.JavaConversions._
import com.github.mdr.graphospasm.grapheditor.model._
import com.github.mdr.graphospasm.grapheditor.part.NodeEditPart
class CopyAction(part: IWorkbenchPart) extends SelectionAction(part) {
setId(ActionFactory.COPY.getId)
setText("Copy")
{
val sharedImages = PlatformUI.getWorkbench.getSharedImages
setImageDescriptor(sharedImages.getImageDescriptor(ISharedImages.IMG_TOOL_COPY))
setDisabledImageDescriptor(sharedImages.getImageDescriptor(ISharedImages.IMG_TOOL_COPY_DISABLED))
}
override def run() {
var originalToClone: Map[Node, Node] = Map()
val originalNodes = getSelectedObjects.toList.map(_.asInstanceOf[NodeEditPart].getModel)
val clonedNodes = originalNodes.map { node ⇒
val clonedNode = node.copy
originalToClone += node -> clonedNode
clonedNode
}
// TODO: duplication with clone nodes command
val clonedConnections = for {
originalNode ← originalNodes
connection ← originalNode.sourceConnections // Just source connections to avoid double counting
clonedSource ← originalToClone.get(connection.source)
clonedTarget ← originalToClone.get(connection.target)
} yield Connection.create(clonedSource, clonedTarget, connection.nameOpt)
Clipboard.getDefault.setContents(NodesAndConnections(clonedNodes, clonedConnections))
}
def calculateEnabled = getSelectedObjects.nonEmpty && getSelectedObjects.forall(_.isInstanceOf[NodeEditPart])
} | mdr/graphospasm | com.github.mdr.graphospasm.grapheditor/src/main/scala/com/github/mdr/graphospasm/grapheditor/actions/CopyAction.scala | Scala | mit | 1,800 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io._
import java.nio.charset.StandardCharsets
import java.util.{ConcurrentModificationException, EnumSet, UUID}
import scala.reflect.ClassTag
import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.util.UninterruptibleThread
/**
* A [[MetadataLog]] implementation based on HDFS. [[HDFSMetadataLog]] uses the specified `path`
* as the metadata storage.
*
* When writing a new batch, [[HDFSMetadataLog]] will firstly write to a temp file and then rename
* it to the final batch file. If the rename step fails, there must be multiple writers and only
* one of them will succeed and the others will fail.
*
* Note: [[HDFSMetadataLog]] doesn't support S3-like file systems as they don't guarantee listing
* files in a directory always shows the latest files.
*/
class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: SparkSession, path: String)
extends MetadataLog[T] with Logging {
private implicit val formats = Serialization.formats(NoTypeHints)
/** Needed to serialize type T into JSON when using Jackson */
private implicit val manifest = Manifest.classType[T](implicitly[ClassTag[T]].runtimeClass)
// Avoid serializing generic sequences, see SPARK-17372
require(implicitly[ClassTag[T]].runtimeClass != classOf[Seq[_]],
"Should not create a log with type Seq, use Arrays instead - see SPARK-17372")
import HDFSMetadataLog._
val metadataPath = new Path(path)
protected val fileManager = createFileManager()
runUninterruptiblyIfLocal {
if (!fileManager.exists(metadataPath)) {
fileManager.mkdirs(metadataPath)
}
}
private def runUninterruptiblyIfLocal[T](body: => T): T = {
if (fileManager.isLocalFileSystem && Thread.currentThread.isInstanceOf[UninterruptibleThread]) {
// When using a local file system, some file system APIs like "create" or "mkdirs" must be
// called in [[org.apache.spark.util.UninterruptibleThread]] so that interrupts can be
// disabled.
//
// This is because there is a potential dead-lock in Hadoop "Shell.runCommand" before
// 2.5.0 (HADOOP-10622). If the thread running "Shell.runCommand" is interrupted, then
// the thread can get deadlocked. In our case, file system APIs like "create" or "mkdirs"
// will call "Shell.runCommand" to set the file permission if using the local file system,
// and can get deadlocked if the stream execution thread is stopped by interrupt.
//
// Hence, we use "runUninterruptibly" here to disable interrupts here. (SPARK-14131)
Thread.currentThread.asInstanceOf[UninterruptibleThread].runUninterruptibly {
body
}
} else {
// For a distributed file system, such as HDFS or S3, if the network is broken, write
// operations may just hang until timeout. We should enable interrupts to allow stopping
// the query fast.
body
}
}
/**
* A `PathFilter` to filter only batch files
*/
protected val batchFilesFilter = new PathFilter {
override def accept(path: Path): Boolean = isBatchFile(path)
}
protected def batchIdToPath(batchId: Long): Path = {
new Path(metadataPath, batchId.toString)
}
protected def pathToBatchId(path: Path) = {
path.getName.toLong
}
protected def isBatchFile(path: Path) = {
try {
path.getName.toLong
true
} catch {
case _: NumberFormatException => false
}
}
protected def serialize(metadata: T, out: OutputStream): Unit = {
// called inside a try-finally where the underlying stream is closed in the caller
Serialization.write(metadata, out)
}
protected def deserialize(in: InputStream): T = {
// called inside a try-finally where the underlying stream is closed in the caller
val reader = new InputStreamReader(in, StandardCharsets.UTF_8)
Serialization.read[T](reader)
}
/**
* Store the metadata for the specified batchId and return `true` if successful. If the batchId's
* metadata has already been stored, this method will return `false`.
*/
override def add(batchId: Long, metadata: T): Boolean = {
get(batchId).map(_ => false).getOrElse {
// Only write metadata when the batch has not yet been written
runUninterruptiblyIfLocal {
writeBatch(batchId, metadata)
}
true
}
}
private def writeTempBatch(metadata: T): Option[Path] = {
while (true) {
val tempPath = new Path(metadataPath, s".${UUID.randomUUID.toString}.tmp")
try {
val output = fileManager.create(tempPath)
try {
serialize(metadata, output)
return Some(tempPath)
} finally {
IOUtils.closeQuietly(output)
}
} catch {
case e: IOException if isFileAlreadyExistsException(e) =>
// Failed to create "tempPath". There are two cases:
// 1. Someone is creating "tempPath" too.
// 2. This is a restart. "tempPath" has already been created but not moved to the final
// batch file (not committed).
//
// For both cases, the batch has not yet been committed. So we can retry it.
//
// Note: there is a potential risk here: if HDFSMetadataLog A is running, people can use
// the same metadata path to create "HDFSMetadataLog" and fail A. However, this is not a
// big problem because it requires the attacker must have the permission to write the
// metadata path. In addition, the old Streaming also have this issue, people can create
// malicious checkpoint files to crash a Streaming application too.
}
}
None
}
/**
* Write a batch to a temp file then rename it to the batch file.
*
* There may be multiple [[HDFSMetadataLog]] using the same metadata path. Although it is not a
* valid behavior, we still need to prevent it from destroying the files.
*/
private def writeBatch(batchId: Long, metadata: T): Unit = {
val tempPath = writeTempBatch(metadata).getOrElse(
throw new IllegalStateException(s"Unable to create temp batch file $batchId"))
try {
// Try to commit the batch
// It will fail if there is an existing file (someone has committed the batch)
logDebug(s"Attempting to write log #${batchIdToPath(batchId)}")
fileManager.rename(tempPath, batchIdToPath(batchId))
// SPARK-17475: HDFSMetadataLog should not leak CRC files
// If the underlying filesystem didn't rename the CRC file, delete it.
val crcPath = new Path(tempPath.getParent(), s".${tempPath.getName()}.crc")
if (fileManager.exists(crcPath)) fileManager.delete(crcPath)
} catch {
case e: IOException if isFileAlreadyExistsException(e) =>
// If "rename" fails, it means some other "HDFSMetadataLog" has committed the batch.
// So throw an exception to tell the user this is not a valid behavior.
throw new ConcurrentModificationException(
s"Multiple HDFSMetadataLog are using $path", e)
} finally {
fileManager.delete(tempPath)
}
}
private def isFileAlreadyExistsException(e: IOException): Boolean = {
e.isInstanceOf[FileAlreadyExistsException] ||
// Old Hadoop versions don't throw FileAlreadyExistsException. Although it's fixed in
// HADOOP-9361 in Hadoop 2.5, we still need to support old Hadoop versions.
(e.getMessage != null && e.getMessage.startsWith("File already exists: "))
}
/**
* @return the deserialized metadata in a batch file, or None if file not exist.
* @throws IllegalArgumentException when path does not point to a batch file.
*/
def get(batchFile: Path): Option[T] = {
if (fileManager.exists(batchFile)) {
if (isBatchFile(batchFile)) {
get(pathToBatchId(batchFile))
} else {
throw new IllegalArgumentException(s"File ${batchFile} is not a batch file!")
}
} else {
None
}
}
override def get(batchId: Long): Option[T] = {
val batchMetadataFile = batchIdToPath(batchId)
if (fileManager.exists(batchMetadataFile)) {
val input = fileManager.open(batchMetadataFile)
try {
Some(deserialize(input))
} catch {
case ise: IllegalStateException =>
// re-throw the exception with the log file path added
throw new IllegalStateException(
s"Failed to read log file $batchMetadataFile. ${ise.getMessage}", ise)
} finally {
IOUtils.closeQuietly(input)
}
} else {
logDebug(s"Unable to find batch $batchMetadataFile")
None
}
}
override def get(startId: Option[Long], endId: Option[Long]): Array[(Long, T)] = {
val files = fileManager.list(metadataPath, batchFilesFilter)
val batchIds = files
.map(f => pathToBatchId(f.getPath))
.filter { batchId =>
(endId.isEmpty || batchId <= endId.get) && (startId.isEmpty || batchId >= startId.get)
}
batchIds.sorted.map(batchId => (batchId, get(batchId))).filter(_._2.isDefined).map {
case (batchId, metadataOption) =>
(batchId, metadataOption.get)
}
}
override def getLatest(): Option[(Long, T)] = {
val batchIds = fileManager.list(metadataPath, batchFilesFilter)
.map(f => pathToBatchId(f.getPath))
.sorted
.reverse
for (batchId <- batchIds) {
val batch = get(batchId)
if (batch.isDefined) {
return Some((batchId, batch.get))
}
}
None
}
/**
* Get an array of [FileStatus] referencing batch files.
* The array is sorted by most recent batch file first to
* oldest batch file.
*/
def getOrderedBatchFiles(): Array[FileStatus] = {
fileManager.list(metadataPath, batchFilesFilter)
.sortBy(f => pathToBatchId(f.getPath))
.reverse
}
/**
* Removes all the log entry earlier than thresholdBatchId (exclusive).
*/
override def purge(thresholdBatchId: Long): Unit = {
val batchIds = fileManager.list(metadataPath, batchFilesFilter)
.map(f => pathToBatchId(f.getPath))
for (batchId <- batchIds if batchId < thresholdBatchId) {
val path = batchIdToPath(batchId)
fileManager.delete(path)
logTrace(s"Removed metadata log file: $path")
}
}
private def createFileManager(): FileManager = {
val hadoopConf = sparkSession.sessionState.newHadoopConf()
try {
new FileContextManager(metadataPath, hadoopConf)
} catch {
case e: UnsupportedFileSystemException =>
logWarning("Could not use FileContext API for managing metadata log files at path " +
s"$metadataPath. Using FileSystem API instead for managing log files. The log may be " +
s"inconsistent under failures.")
new FileSystemManager(metadataPath, hadoopConf)
}
}
/**
* Parse the log version from the given `text` -- will throw exception when the parsed version
* exceeds `maxSupportedVersion`, or when `text` is malformed (such as "xyz", "v", "v-1",
* "v123xyz" etc.)
*/
private[sql] def parseVersion(text: String, maxSupportedVersion: Int): Int = {
if (text.length > 0 && text(0) == 'v') {
val version =
try {
text.substring(1, text.length).toInt
} catch {
case _: NumberFormatException =>
throw new IllegalStateException(s"Log file was malformed: failed to read correct log " +
s"version from $text.")
}
if (version > 0) {
if (version > maxSupportedVersion) {
throw new IllegalStateException(s"UnsupportedLogVersion: maximum supported log version " +
s"is v${maxSupportedVersion}, but encountered v$version. The log file was produced " +
s"by a newer version of Spark and cannot be read by this version. Please upgrade.")
} else {
return version
}
}
}
// reaching here means we failed to read the correct log version
throw new IllegalStateException(s"Log file was malformed: failed to read correct log " +
s"version from $text.")
}
}
object HDFSMetadataLog {
/** A simple trait to abstract out the file management operations needed by HDFSMetadataLog. */
trait FileManager {
/** List the files in a path that matches a filter. */
def list(path: Path, filter: PathFilter): Array[FileStatus]
/** Make directory at the give path and all its parent directories as needed. */
def mkdirs(path: Path): Unit
/** Whether path exists */
def exists(path: Path): Boolean
/** Open a file for reading, or throw exception if it does not exist. */
def open(path: Path): FSDataInputStream
/** Create path, or throw exception if it already exists */
def create(path: Path): FSDataOutputStream
/**
* Atomically rename path, or throw exception if it cannot be done.
* Should throw FileNotFoundException if srcPath does not exist.
* Should throw FileAlreadyExistsException if destPath already exists.
*/
def rename(srcPath: Path, destPath: Path): Unit
/** Recursively delete a path if it exists. Should not throw exception if file doesn't exist. */
def delete(path: Path): Unit
/** Whether the file systme is a local FS. */
def isLocalFileSystem: Boolean
}
/**
* Default implementation of FileManager using newer FileContext API.
*/
class FileContextManager(path: Path, hadoopConf: Configuration) extends FileManager {
private val fc = if (path.toUri.getScheme == null) {
FileContext.getFileContext(hadoopConf)
} else {
FileContext.getFileContext(path.toUri, hadoopConf)
}
override def list(path: Path, filter: PathFilter): Array[FileStatus] = {
fc.util.listStatus(path, filter)
}
override def rename(srcPath: Path, destPath: Path): Unit = {
fc.rename(srcPath, destPath)
}
override def mkdirs(path: Path): Unit = {
fc.mkdir(path, FsPermission.getDirDefault, true)
}
override def open(path: Path): FSDataInputStream = {
fc.open(path)
}
override def create(path: Path): FSDataOutputStream = {
fc.create(path, EnumSet.of(CreateFlag.CREATE))
}
override def exists(path: Path): Boolean = {
fc.util().exists(path)
}
override def delete(path: Path): Unit = {
try {
fc.delete(path, true)
} catch {
case e: FileNotFoundException =>
// ignore if file has already been deleted
}
}
override def isLocalFileSystem: Boolean = fc.getDefaultFileSystem match {
case _: local.LocalFs | _: local.RawLocalFs =>
// LocalFs = RawLocalFs + ChecksumFs
true
case _ => false
}
}
/**
* Implementation of FileManager using older FileSystem API. Note that this implementation
* cannot provide atomic renaming of paths, hence can lead to consistency issues. This
* should be used only as a backup option, when FileContextManager cannot be used.
*/
class FileSystemManager(path: Path, hadoopConf: Configuration) extends FileManager {
private val fs = path.getFileSystem(hadoopConf)
override def list(path: Path, filter: PathFilter): Array[FileStatus] = {
fs.listStatus(path, filter)
}
/**
* Rename a path. Note that this implementation is not atomic.
* @throws FileNotFoundException if source path does not exist.
* @throws FileAlreadyExistsException if destination path already exists.
* @throws IOException if renaming fails for some unknown reason.
*/
override def rename(srcPath: Path, destPath: Path): Unit = {
if (!fs.exists(srcPath)) {
throw new FileNotFoundException(s"Source path does not exist: $srcPath")
}
if (fs.exists(destPath)) {
throw new FileAlreadyExistsException(s"Destination path already exists: $destPath")
}
if (!fs.rename(srcPath, destPath)) {
throw new IOException(s"Failed to rename $srcPath to $destPath")
}
}
override def mkdirs(path: Path): Unit = {
fs.mkdirs(path, FsPermission.getDirDefault)
}
override def open(path: Path): FSDataInputStream = {
fs.open(path)
}
override def create(path: Path): FSDataOutputStream = {
fs.create(path, false)
}
override def exists(path: Path): Boolean = {
fs.exists(path)
}
override def delete(path: Path): Unit = {
try {
fs.delete(path, true)
} catch {
case e: FileNotFoundException =>
// ignore if file has already been deleted
}
}
override def isLocalFileSystem: Boolean = fs match {
case _: LocalFileSystem | _: RawLocalFileSystem =>
// LocalFileSystem = RawLocalFileSystem + ChecksumFileSystem
true
case _ => false
}
}
}
| spark0001/spark2.1.1 | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala | Scala | apache-2.0 | 17,869 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import java.io.{IOException, OutputStream}
import scala.collection.JavaConverters._
import org.apache.avro.Schema
import org.apache.avro.generic.GenericRecord
import org.apache.avro.mapred.AvroKey
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapreduce.{RecordWriter, TaskAttemptContext}
import org.apache.spark.SPARK_VERSION_SHORT
import org.apache.spark.sql.{SPARK_LEGACY_DATETIME, SPARK_VERSION_METADATA_KEY}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.OutputWriter
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
// NOTE: This class is instantiated and used on executor side only, no need to be serializable.
private[avro] class AvroOutputWriter(
path: String,
context: TaskAttemptContext,
schema: StructType,
avroSchema: Schema) extends OutputWriter {
// Whether to rebase datetimes from Gregorian to Julian calendar in write
private val rebaseDateTime: Boolean =
SQLConf.get.getConf(SQLConf.LEGACY_AVRO_REBASE_DATETIME_IN_WRITE)
// The input rows will never be null.
private lazy val serializer =
new AvroSerializer(schema, avroSchema, nullable = false, rebaseDateTime)
/**
* Overrides the couple of methods responsible for generating the output streams / files so
* that the data can be correctly partitioned
*/
private val recordWriter: RecordWriter[AvroKey[GenericRecord], NullWritable] = {
val fileMeta = Map(SPARK_VERSION_METADATA_KEY -> SPARK_VERSION_SHORT) ++ {
if (rebaseDateTime) Some(SPARK_LEGACY_DATETIME -> "") else None
}
new SparkAvroKeyOutputFormat(fileMeta.asJava) {
override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = {
new Path(path)
}
@throws(classOf[IOException])
override def getAvroFileOutputStream(c: TaskAttemptContext): OutputStream = {
val path = getDefaultWorkFile(context, ".avro")
path.getFileSystem(context.getConfiguration).create(path)
}
}.getRecordWriter(context)
}
override def write(row: InternalRow): Unit = {
val key = new AvroKey(serializer.serialize(row).asInstanceOf[GenericRecord])
recordWriter.write(key, NullWritable.get())
}
override def close(): Unit = recordWriter.close(context)
}
| zuotingbing/spark | external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOutputWriter.scala | Scala | apache-2.0 | 3,197 |
package scl
import java.awt
class SwHsep(h:Int=10) extends swing.Separator(swing.Orientation.Horizontal) {
maximumSize = new swing.Dimension(Integer.MAX_VALUE, h)
}
object SwUtil {
val svgColors = Map("black"->"#000000","navy"->"#000080","darkblue"->"#00008B","mediumblue"->"#0000CD","blue"->"#0000FF",
"darkgreen"->"#006400","green"->"#008000","teal"->"#008080","darkcyan"->"#008B8B","deepskyblue"->"#00BFFF","darkturquoise"->"#00CED1",
"mediumspringgreen"->"#00FA9A","lime"->"#00FF00","springgreen"->"#00FF7F","cyan"->"#00FFFF","aqua"->"#00FFFF","midnightblue"->"#191970",
"dodgerblue"->"#1E90FF","lightseagreen"->"#20B2AA","forestgreen"->"#228B22","seagreen"->"#2E8B57","darkslategray"->"#2F4F4F",
"darkslategrey"->"#2F4F4F","limegreen"->"#32CD32","mediumseagreen"->"#3CB371","turquoise"->"#40E0D0","royalblue"->"#4169E1",
"steelblue"->"#4682B4","darkslateblue"->"#483D8B","mediumturquoise"->"#48D1CC","indigo"->"#4B0082","darkolivegreen"->"#556B2F",
"cadetblue"->"#5F9EA0","cornflowerblue"->"#6495ED","mediumaquamarine"->"#66CDAA","dimgrey"->"#696969","dimgray"->"#696969",
"slateblue"->"#6A5ACD","olivedrab"->"#6B8E23","slategrey"->"#708090","slategray"->"#708090","lightslategray"->"#778899",
"lightslategrey"->"#778899","mediumslateblue"->"#7B68EE","lawngreen"->"#7CFC00","chartreuse"->"#7FFF00","aquamarine"->"#7FFFD4",
"maroon"->"#800000","purple"->"#800080","olive"->"#808000","gray"->"#808080","grey"->"#808080","skyblue"->"#87CEEB",
"lightskyblue"->"#87CEFA","blueviolet"->"#8A2BE2","darkred"->"#8B0000","darkmagenta"->"#8B008B","saddlebrown"->"#8B4513",
"darkseagreen"->"#8FBC8F","lightgreen"->"#90EE90","mediumpurple"->"#9370DB","darkviolet"->"#9400D3","palegreen"->"#98FB98",
"darkorchid"->"#9932CC","yellowgreen"->"#9ACD32","sienna"->"#A0522D","brown"->"#A52A2A","darkgray"->"#A9A9A9","darkgrey"->"#A9A9A9",
"lightblue"->"#ADD8E6","greenyellow"->"#ADFF2F","paleturquoise"->"#AFEEEE","lightsteelblue"->"#B0C4DE","powderblue"->"#B0E0E6",
"firebrick"->"#B22222","darkgoldenrod"->"#B8860B","mediumorchid"->"#BA55D3","rosybrown"->"#BC8F8F","darkkhaki"->"#BDB76B",
"silver"->"#C0C0C0","mediumvioletred"->"#C71585","indianred"->"#CD5C5C","peru"->"#CD853F","chocolate"->"#D2691E","tan"->"#D2B48C",
"lightgray"->"#D3D3D3","lightgrey"->"#D3D3D3","thistle"->"#D8BFD8","orchid"->"#DA70D6","goldenrod"->"#DAA520","palevioletred"->"#DB7093",
"crimson"->"#DC143C","gainsboro"->"#DCDCDC","plum"->"#DDA0DD","burlywood"->"#DEB887","lightcyan"->"#E0FFFF","lavender"->"#E6E6FA",
"darksalmon"->"#E9967A","violet"->"#EE82EE","palegoldenrod"->"#EEE8AA","lightcoral"->"#F08080","khaki"->"#F0E68C","aliceblue"->"#F0F8FF",
"honeydew"->"#F0FFF0","azure"->"#F0FFFF","sandybrown"->"#F4A460","wheat"->"#F5DEB3","beige"->"#F5F5DC","whitesmoke"->"#F5F5F5",
"mintcream"->"#F5FFFA","ghostwhite"->"#F8F8FF","salmon"->"#FA8072","antiquewhite"->"#FAEBD7","linen"->"#FAF0E6","lightgoldenrodyellow"->"#FAFAD2",
"oldlace"->"#FDF5E6","red"->"#FF0000","fuchsia"->"#FF00FF","magenta"->"#FF00FF","deeppink"->"#FF1493","orangered"->"#FF4500",
"tomato"->"#FF6347","hotpink"->"#FF69B4","coral"->"#FF7F50","darkorange"->"#FF8C00","lightsalmon"->"#FFA07A","orange"->"#FFA500",
"lightpink"->"#FFB6C1","pink"->"#FFC0CB","gold"->"#FFD700","peachpuff"->"#FFDAB9","navajowhite"->"#FFDEAD","moccasin"->"#FFE4B5",
"bisque"->"#FFE4C4","mistyrose"->"#FFE4E1","blanchedalmond"->"#FFEBCD","papayawhip"->"#FFEFD5","lavenderblush"->"#FFF0F5","seashell"->"#FFF5EE",
"cornsilk"->"#FFF8DC","lemonchiffon"->"#FFFACD","floralwhite"->"#FFFAF0","snow"->"#FFFAFA","yellow"->"#FFFF00","lightyellow"->"#FFFFE0",
"ivory"->"#FFFFF0","white"->"#FFFFFF"
)
val svgNames = svgColors.keys
def svgColor(c:String, d:String="black") = java.awt.Color.decode(if (c.startsWith("#")) c else svgColors.getOrElse(c, svgColors.getOrElse(d, svgColors("black"))))
def svgName(c:java.awt.Color) = "#" + Integer.toHexString(c.getRGB).substring(2)
// polygon bounding rectangle
def polyBounds(poly:Seq[(Int,Int)]):Seq[(Int,Int)] = {
var xMin = Int.MaxValue; var xMax = Int.MinValue
var yMin = Int.MaxValue; var yMax = Int.MinValue
for (v <- poly){
xMin = Math.min(xMin, v._1); xMax = Math.max(xMax, v._1)
yMin = Math.min(yMin, v._2); yMax = Math.max(yMax, v._2)
}
Array((xMin,yMin), (xMax,yMax))
}
// bounding rectangle
def polyRect(poly:Seq[(Int,Int)]):swing.Rectangle = {
val bounds = polyBounds(poly)
new swing.Rectangle(bounds(0)._1,bounds(0)._2, bounds(1)._1-bounds(0)._1,bounds(1)._2-bounds(0)._2)
}
// check if point in polygon
def pointInPoly(x:Int,y:Int, poly:Seq[(Int,Int)]):Boolean = {
var ip = false
if (poly.length > 2){
var i = 0; var j = poly.length-1
while (i < poly.length){
if (((poly(i)._2 > y) != (poly(j)._2 > y)) &&
(x < (poly(j)._1-poly(i)._1) * (y-poly(i)._2) / (poly(j)._2-poly(i)._2) + poly(i)._1))
ip = !ip;
j = i; i += 1
}
}
ip
}
// scale polygon from view coordinates to real imagecoordinates
def polyScaled(poly:Seq[(Int,Int)], scaleX:Double,scaleY:Double):Seq[(Int,Int)] =
for (v <- poly) yield ( (v._1 * scaleX).toInt, (v._2 * scaleY).toInt );
// check if polygon is rectangle
def polyIsRect(poly:Seq[(Int,Int)]):Boolean =
((poly.length == 4)&&(poly(0)._2 == poly(1)._2)&&(poly(1)._1 == poly(2)._1)&&(poly(2)._2 == poly(3)._2)&&(poly(3)._1 == poly(0)._1));
// calculate polygon area
def polyRectArea(poly:Seq[(Int,Int)]):Int = if (poly.length < 3) 0 else {
val r = polyRect(poly)
r.width * r.height
}
// create 4-vertex poly from 2-vertex rectangle
def rectToPoly(rect:Seq[(Int,Int)]):Seq[(Int,Int)] = if (rect.length != 2) Nil else {
val x = Math.min(rect(0)._1,rect(1)._1); val y = Math.min(rect(0)._2,rect(1)._2)
val w = Math.abs(rect(1)._1 - rect(0)._1); val h = Math.abs(rect(1)._2 - rect(0)._2)
Seq((x,y),(x+w,y),(x+w,y+h),(x,y+h))
}
// get image in polygon
def cropImage(poly:Seq[(Int,Int)], im:awt.image.BufferedImage, rotation:Double):awt.image.BufferedImage = {
if ((poly.length > 2)&&(im != null)){
val bounds = polyBounds(poly)
val fill = awt.Color.WHITE.getRGB
val crop = cloneImage(im.getSubimage(bounds(0)._1,bounds(0)._2, bounds(1)._1-bounds(0)._1+1, bounds(1)._2-bounds(0)._2+1))
// clear out-of-border dots in non-rectangular polygon
if (!polyIsRect(poly)) for (y <- 0 until (bounds(1)._2 - bounds(0)._2))
for (x <- 0 until (bounds(1)._1 - bounds(0)._1)) try {
if (!pointInPoly(x+bounds(0)._1,y+bounds(0)._2, poly)) crop.setRGB(x,y, fill)
} catch { case e:Exception => println(s"error at ${x}:${y}"); throw e }
if (rotation != 0.0){ rotateImage(crop, rotation)
//println("angle: "+rotation)
//println(s"${crop.getWidth} x ${crop.getHeight}")
// val t = new awt.geom.AffineTransform
// t.rotate(rotation * Math.PI / 180.0, crop.getWidth / 2, crop.getHeight / 2)
// (new awt.image.AffineTransformOp(t,
// awt.image.AffineTransformOp.TYPE_BILINEAR)).filter(crop,null)
} else crop
} else null
}
// convert Image to BufferedImage
def toBufferedImage(im:awt.Image):awt.image.BufferedImage = if (im != null){
val nim = new awt.image.BufferedImage(im.getWidth(null), im.getHeight(null), awt.image.BufferedImage.TYPE_INT_ARGB)
val g = nim.getGraphics
nim.getGraphics.drawImage(im, 0,0, null);
nim.getGraphics.dispose
nim
} else null
// fit image to width/height
def fitImage(im:awt.image.BufferedImage, w:Int, h:Int):awt.image.BufferedImage = {
var nw = w; var nh = h
if (im.getHeight.toDouble / im.getWidth >= h.toDouble / w) nw = (im.getWidth.toDouble * nh / im.getHeight).toInt
else nh = (im.getHeight.toDouble * nw / im.getWidth.toDouble).toInt
toBufferedImage(im.getScaledInstance(nw, nh, awt.Image.SCALE_AREA_AVERAGING))
}
// clone image
def cloneImage(im:awt.image.BufferedImage):awt.image.BufferedImage =
new awt.image.BufferedImage(im.getColorModel, im.copyData(im.getRaster.createCompatibleWritableRaster), im.getColorModel.isAlphaPremultiplied, null);
// rotate image
def rotateImage(im:awt.image.BufferedImage, angle:Double):awt.image.BufferedImage = {
val w = im.getWidth; val h = im.getHeight
if (angle == 0.0) im
else if ((angle == 90.0)||(angle == -90.0)){
val io = new awt.image.BufferedImage(h,w, im.getType)
if (angle == 90.0) for (y <- 0 until h; x <- 0 until w) io.setRGB(h-y-1,x, im.getRGB(x,y))
else for (y <- 0 until h; x <- 0 until w) io.setRGB(y,w-x-1, im.getRGB(x,y))
io
} else if (angle == 180.0){
val io = new awt.image.BufferedImage(w,h, im.getType)
for (y <- 0 until h; x <- 0 until w) io.setRGB(x,h-y-1, im.getRGB(x,y))
io
} else im
}
}
| tardigrade888/scsvlog | repo/src/scl/SwUtil.scala | Scala | mit | 9,420 |
package mist.api
import mist.api.data.JsMap
import mist.api.encoding.JsSyntax._
import org.scalatest.{FunSpec, Matchers}
import shadedshapeless.HNil
class WithArgsScalaSpec extends FunSpec with Matchers {
import mist.api.ArgsInstances._
import mist.api.encoding.defaults._
import WithArgsScala._
it("should apply tuples") {
val result = withArgs(const("a"), const("b"), const("c"), const(5), const("last"))
val extraction = result.extract(FnContext.onlyInput(JsMap.empty))
extraction shouldBe Extracted("a", "b", "c", 5, "last")
}
it("should flat tuples") {
val result = withArgs(const("a") & const("b"), const("c") & const(5) & const("last"))
val extraction = result.extract(FnContext.onlyInput(JsMap.empty))
extraction shouldBe Extracted("a", "b", "c", 5, "last")
}
it("should apply single element") {
val result = withArgs(const("a"))
val extraction = result.extract(FnContext.onlyInput(JsMap.empty))
extraction shouldBe Extracted("a")
}
it("should work with user args") {
import ArgsInstances._
val result = withArgs((arg[Int]("n"): UserArg[Int], arg[Int]("m")))
val extraction = result.extract(FnContext.onlyInput(JsMap("n" -> 5.js, "m" -> 10.js)))
extraction shouldBe Extracted(5, 10)
}
it("should work with single user arg") {
import ArgsInstances._
val result = withArgs(arg[Int]("n"): UserArg[Int])
val extraction = result.extract(FnContext.onlyInput(JsMap("n" -> 5.js, "m" -> 10.js)))
extraction shouldBe Extracted(5)
}
}
| Hydrospheredata/mist | mist-lib/src/test/scala/mist/api/WithArgsScalaSpec.scala | Scala | apache-2.0 | 1,535 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.util.Locale
import java.util.concurrent.TimeUnit._
import scala.collection.{GenMap, GenSeq}
import scala.collection.parallel.ForkJoinTaskSupport
import scala.collection.parallel.immutable.ParVector
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.mapred.{FileInputFormat, JobConf}
import org.apache.spark.internal.config.RDD_PARALLEL_LISTING_THRESHOLD
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.connector.catalog.{CatalogV2Util, TableCatalog}
import org.apache.spark.sql.connector.catalog.SupportsNamespaces._
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.PartitioningUtils
import org.apache.spark.util.{SerializableConfiguration, ThreadUtils}
// Note: The definition of these commands are based on the ones described in
// https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL
/**
* A command for users to create a new database.
*
* It will issue an error message when the database with the same name already exists,
* unless 'ifNotExists' is true.
* The syntax of using this command in SQL is:
* {{{
* CREATE (DATABASE|SCHEMA) [IF NOT EXISTS] database_name
* [COMMENT database_comment]
* [LOCATION database_directory]
* [WITH DBPROPERTIES (property_name=property_value, ...)];
* }}}
*/
case class CreateDatabaseCommand(
databaseName: String,
ifNotExists: Boolean,
path: Option[String],
comment: Option[String],
props: Map[String, String])
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
catalog.createDatabase(
CatalogDatabase(
databaseName,
comment.getOrElse(""),
path.map(CatalogUtils.stringToURI).getOrElse(catalog.getDefaultDBPath(databaseName)),
props),
ifNotExists)
Seq.empty[Row]
}
}
/**
* A command for users to remove a database from the system.
*
* 'ifExists':
* - true, if database_name doesn't exist, no action
* - false (default), if database_name doesn't exist, a warning message will be issued
* 'cascade':
* - true, the dependent objects are automatically dropped before dropping database.
* - false (default), it is in the Restrict mode. The database cannot be dropped if
* it is not empty. The inclusive tables must be dropped at first.
*
* The syntax of using this command in SQL is:
* {{{
* DROP DATABASE [IF EXISTS] database_name [RESTRICT|CASCADE];
* }}}
*/
case class DropDatabaseCommand(
databaseName: String,
ifExists: Boolean,
cascade: Boolean)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
sparkSession.sessionState.catalog.dropDatabase(databaseName, ifExists, cascade)
Seq.empty[Row]
}
}
/**
* A command for users to add new (key, value) pairs into DBPROPERTIES
* If the database does not exist, an error message will be issued to indicate the database
* does not exist.
* The syntax of using this command in SQL is:
* {{{
* ALTER (DATABASE|SCHEMA) database_name SET DBPROPERTIES (property_name=property_value, ...)
* }}}
*/
case class AlterDatabasePropertiesCommand(
databaseName: String,
props: Map[String, String])
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val db: CatalogDatabase = catalog.getDatabaseMetadata(databaseName)
catalog.alterDatabase(db.copy(properties = db.properties ++ props))
Seq.empty[Row]
}
}
/**
* A command for users to set new location path for a database
* If the database does not exist, an error message will be issued to indicate the database
* does not exist.
* The syntax of using this command in SQL is:
* {{{
* ALTER (DATABASE|SCHEMA) database_name SET LOCATION path
* }}}
*/
case class AlterDatabaseSetLocationCommand(databaseName: String, location: String)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val oldDb = catalog.getDatabaseMetadata(databaseName)
catalog.alterDatabase(oldDb.copy(locationUri = CatalogUtils.stringToURI(location)))
Seq.empty[Row]
}
}
/**
* A command for users to show the name of the database, its comment (if one has been set), and its
* root location on the filesystem. When extended is true, it also shows the database's properties
* If the database does not exist, an error message will be issued to indicate the database
* does not exist.
* The syntax of using this command in SQL is
* {{{
* DESCRIBE DATABASE [EXTENDED] db_name
* }}}
*/
case class DescribeDatabaseCommand(
databaseName: String,
extended: Boolean,
override val output: Seq[Attribute])
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val dbMetadata: CatalogDatabase =
sparkSession.sessionState.catalog.getDatabaseMetadata(databaseName)
val allDbProperties = dbMetadata.properties
val result =
Row("Database Name", dbMetadata.name) ::
Row("Comment", dbMetadata.description) ::
Row("Location", CatalogUtils.URIToString(dbMetadata.locationUri))::
Row("Owner", allDbProperties.getOrElse(PROP_OWNER, "")) :: Nil
if (extended) {
val properties = allDbProperties -- CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES
val propertiesStr =
if (properties.isEmpty) {
""
} else {
properties.toSeq.mkString("(", ", ", ")")
}
result :+ Row("Properties", propertiesStr)
} else {
result
}
}
}
/**
* Drops a table/view from the metastore and removes it if it is cached.
*
* The syntax of this command is:
* {{{
* DROP TABLE [IF EXISTS] table_name;
* DROP VIEW [IF EXISTS] [db_name.]view_name;
* }}}
*/
case class DropTableCommand(
tableName: TableIdentifier,
ifExists: Boolean,
isView: Boolean,
purge: Boolean) extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val isTempView = catalog.isTempView(tableName)
if (!isTempView && catalog.tableExists(tableName)) {
// If the command DROP VIEW is to drop a table or DROP TABLE is to drop a view
// issue an exception.
catalog.getTableMetadata(tableName).tableType match {
case CatalogTableType.VIEW if !isView =>
throw new AnalysisException(
"Cannot drop a view with DROP TABLE. Please use DROP VIEW instead")
case o if o != CatalogTableType.VIEW && isView =>
throw new AnalysisException(
s"Cannot drop a table with DROP VIEW. Please use DROP TABLE instead")
case _ =>
}
}
if (isTempView || catalog.tableExists(tableName)) {
try {
val hasViewText = isTempView &&
catalog.getTempViewOrPermanentTableMetadata(tableName).viewText.isDefined
sparkSession.sharedState.cacheManager.uncacheQuery(
sparkSession.table(tableName), cascade = !isTempView || hasViewText)
} catch {
case NonFatal(e) => log.warn(e.toString, e)
}
catalog.refreshTable(tableName)
catalog.dropTable(tableName, ifExists, purge)
} else if (ifExists) {
// no-op
} else {
throw new AnalysisException(s"Table or view not found: ${tableName.identifier}")
}
Seq.empty[Row]
}
}
/**
* A command that sets table/view properties.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 SET TBLPROPERTIES ('key1' = 'val1', 'key2' = 'val2', ...);
* ALTER VIEW view1 SET TBLPROPERTIES ('key1' = 'val1', 'key2' = 'val2', ...);
* }}}
*/
case class AlterTableSetPropertiesCommand(
tableName: TableIdentifier,
properties: Map[String, String],
isView: Boolean)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableRawMetadata(tableName)
// This overrides old properties and update the comment parameter of CatalogTable
// with the newly added/modified comment since CatalogTable also holds comment as its
// direct property.
val newTable = table.copy(
properties = table.properties ++ properties,
comment = properties.get(TableCatalog.PROP_COMMENT).orElse(table.comment))
catalog.alterTable(newTable)
Seq.empty[Row]
}
}
/**
* A command that unsets table/view properties.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 UNSET TBLPROPERTIES [IF EXISTS] ('key1', 'key2', ...);
* ALTER VIEW view1 UNSET TBLPROPERTIES [IF EXISTS] ('key1', 'key2', ...);
* }}}
*/
case class AlterTableUnsetPropertiesCommand(
tableName: TableIdentifier,
propKeys: Seq[String],
ifExists: Boolean,
isView: Boolean)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableRawMetadata(tableName)
if (!ifExists) {
propKeys.foreach { k =>
if (!table.properties.contains(k) && k != TableCatalog.PROP_COMMENT) {
throw new AnalysisException(
s"Attempted to unset non-existent property '$k' in table '${table.identifier}'")
}
}
}
// If comment is in the table property, we reset it to None
val tableComment = if (propKeys.contains(TableCatalog.PROP_COMMENT)) None else table.comment
val newProperties = table.properties.filter { case (k, _) => !propKeys.contains(k) }
val newTable = table.copy(properties = newProperties, comment = tableComment)
catalog.alterTable(newTable)
Seq.empty[Row]
}
}
/**
* A command to change the column for a table, only support changing the comment of a non-partition
* column for now.
*
* The syntax of using this command in SQL is:
* {{{
* ALTER TABLE table_identifier
* CHANGE [COLUMN] column_old_name column_new_name column_dataType [COMMENT column_comment]
* [FIRST | AFTER column_name];
* }}}
*/
case class AlterTableChangeColumnCommand(
tableName: TableIdentifier,
columnName: String,
newColumn: StructField) extends LeafRunnableCommand {
// TODO: support change column name/dataType/metadata/position.
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableRawMetadata(tableName)
val resolver = sparkSession.sessionState.conf.resolver
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
// Find the origin column from dataSchema by column name.
val originColumn = findColumnByName(table.dataSchema, columnName, resolver)
// Throw an AnalysisException if the column name/dataType is changed.
if (!columnEqual(originColumn, newColumn, resolver)) {
throw new AnalysisException(
"ALTER TABLE CHANGE COLUMN is not supported for changing column " +
s"'${originColumn.name}' with type '${originColumn.dataType}' to " +
s"'${newColumn.name}' with type '${newColumn.dataType}'")
}
val newDataSchema = table.dataSchema.fields.map { field =>
if (field.name == originColumn.name) {
// Create a new column from the origin column with the new comment.
addComment(field, newColumn.getComment)
} else {
field
}
}
catalog.alterTableDataSchema(tableName, StructType(newDataSchema))
Seq.empty[Row]
}
// Find the origin column from schema by column name, throw an AnalysisException if the column
// reference is invalid.
private def findColumnByName(
schema: StructType, name: String, resolver: Resolver): StructField = {
schema.fields.collectFirst {
case field if resolver(field.name, name) => field
}.getOrElse(throw new AnalysisException(
s"Can't find column `$name` given table data columns " +
s"${schema.fieldNames.mkString("[`", "`, `", "`]")}"))
}
// Add the comment to a column, if comment is empty, return the original column.
private def addComment(column: StructField, comment: Option[String]): StructField =
comment.map(column.withComment).getOrElse(column)
// Compare a [[StructField]] to another, return true if they have the same column
// name(by resolver) and dataType.
private def columnEqual(
field: StructField, other: StructField, resolver: Resolver): Boolean = {
resolver(field.name, other.name) && field.dataType == other.dataType
}
}
/**
* A command that sets the serde class and/or serde properties of a table/view.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table [PARTITION spec] SET SERDE serde_name [WITH SERDEPROPERTIES props];
* ALTER TABLE table [PARTITION spec] SET SERDEPROPERTIES serde_properties;
* }}}
*/
case class AlterTableSerDePropertiesCommand(
tableName: TableIdentifier,
serdeClassName: Option[String],
serdeProperties: Option[Map[String, String]],
partSpec: Option[TablePartitionSpec])
extends LeafRunnableCommand {
// should never happen if we parsed things correctly
require(serdeClassName.isDefined || serdeProperties.isDefined,
"ALTER TABLE attempted to set neither serde class name nor serde properties")
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableRawMetadata(tableName)
// For datasource tables, disallow setting serde or specifying partition
if (partSpec.isDefined && DDLUtils.isDatasourceTable(table)) {
throw new AnalysisException("Operation not allowed: ALTER TABLE SET " +
"[SERDE | SERDEPROPERTIES] for a specific partition is not supported " +
"for tables created with the datasource API")
}
if (serdeClassName.isDefined && DDLUtils.isDatasourceTable(table)) {
throw new AnalysisException("Operation not allowed: ALTER TABLE SET SERDE is " +
"not supported for tables created with the datasource API")
}
if (partSpec.isEmpty) {
val newTable = table.withNewStorage(
serde = serdeClassName.orElse(table.storage.serde),
properties = table.storage.properties ++ serdeProperties.getOrElse(Map()))
catalog.alterTable(newTable)
} else {
val spec = partSpec.get
val part = catalog.getPartition(table.identifier, spec)
val newPart = part.copy(storage = part.storage.copy(
serde = serdeClassName.orElse(part.storage.serde),
properties = part.storage.properties ++ serdeProperties.getOrElse(Map())))
catalog.alterPartitions(table.identifier, Seq(newPart))
}
Seq.empty[Row]
}
}
/**
* Add Partition in ALTER TABLE: add the table partitions.
*
* An error message will be issued if the partition exists, unless 'ifNotExists' is true.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table ADD [IF NOT EXISTS] PARTITION spec1 [LOCATION 'loc1']
* PARTITION spec2 [LOCATION 'loc2']
* }}}
*/
case class AlterTableAddPartitionCommand(
tableName: TableIdentifier,
partitionSpecsAndLocs: Seq[(TablePartitionSpec, Option[String])],
ifNotExists: Boolean)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "ALTER TABLE ADD PARTITION")
val parts = partitionSpecsAndLocs.map { case (spec, location) =>
val normalizedSpec = PartitioningUtils.normalizePartitionSpec(
spec,
table.partitionSchema,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
// inherit table storage format (possibly except for location)
CatalogTablePartition(normalizedSpec, table.storage.copy(
locationUri = location.map(CatalogUtils.stringToURI)))
}
// Hive metastore may not have enough memory to handle millions of partitions in single RPC.
// Also the request to metastore times out when adding lot of partitions in one shot.
// we should split them into smaller batches
val batchSize = conf.getConf(SQLConf.ADD_PARTITION_BATCH_SIZE)
parts.toIterator.grouped(batchSize).foreach { batch =>
catalog.createPartitions(table.identifier, batch, ignoreIfExists = ifNotExists)
}
sparkSession.catalog.refreshTable(table.identifier.quotedString)
if (table.stats.nonEmpty && sparkSession.sessionState.conf.autoSizeUpdateEnabled) {
// Updating table stats only if new partition is not empty
val addedSize = CommandUtils.calculateMultipleLocationSizes(sparkSession, table.identifier,
parts.map(_.storage.locationUri)).sum
if (addedSize > 0) {
val newStats = CatalogStatistics(sizeInBytes = table.stats.get.sizeInBytes + addedSize)
catalog.alterTableStats(table.identifier, Some(newStats))
}
} else {
// Re-calculating of table size including all partitions
CommandUtils.updateTableStats(sparkSession, table)
}
Seq.empty[Row]
}
}
/**
* Alter a table partition's spec.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table PARTITION spec1 RENAME TO PARTITION spec2;
* }}}
*/
case class AlterTableRenamePartitionCommand(
tableName: TableIdentifier,
oldPartition: TablePartitionSpec,
newPartition: TablePartitionSpec)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "ALTER TABLE RENAME PARTITION")
val normalizedOldPartition = PartitioningUtils.normalizePartitionSpec(
oldPartition,
table.partitionSchema,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
val normalizedNewPartition = PartitioningUtils.normalizePartitionSpec(
newPartition,
table.partitionSchema,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
catalog.renamePartitions(
tableName, Seq(normalizedOldPartition), Seq(normalizedNewPartition))
sparkSession.catalog.refreshTable(table.identifier.quotedString)
Seq.empty[Row]
}
}
/**
* Drop Partition in ALTER TABLE: to drop a particular partition for a table.
*
* This removes the data and metadata for this partition.
* The data is actually moved to the .Trash/Current directory if Trash is configured,
* unless 'purge' is true, but the metadata is completely lost.
* An error message will be issued if the partition does not exist, unless 'ifExists' is true.
* Note: purge is always false when the target is a view.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...] [PURGE];
* }}}
*/
case class AlterTableDropPartitionCommand(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
ifExists: Boolean,
purge: Boolean,
retainData: Boolean)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "ALTER TABLE DROP PARTITION")
val normalizedSpecs = specs.map { spec =>
PartitioningUtils.normalizePartitionSpec(
spec,
table.partitionSchema,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
}
catalog.dropPartitions(
table.identifier, normalizedSpecs, ignoreIfNotExists = ifExists, purge = purge,
retainData = retainData)
sparkSession.catalog.refreshTable(table.identifier.quotedString)
CommandUtils.updateTableStats(sparkSession, table)
Seq.empty[Row]
}
}
case class PartitionStatistics(numFiles: Int, totalSize: Long)
/**
* Repair a table by recovering all the partition in the directory of the table and
* update the catalog.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table RECOVER PARTITIONS;
* MSCK REPAIR TABLE table [{ADD|DROP|SYNC} PARTITIONS];
* }}}
*/
case class RepairTableCommand(
tableName: TableIdentifier,
enableAddPartitions: Boolean,
enableDropPartitions: Boolean,
cmd: String = "MSCK REPAIR TABLE") extends LeafRunnableCommand {
// These are list of statistics that can be collected quickly without requiring a scan of the data
// see https://github.com/apache/hive/blob/master/
// common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
val NUM_FILES = "numFiles"
val TOTAL_SIZE = "totalSize"
val DDL_TIME = "transient_lastDdlTime"
private def getPathFilter(hadoopConf: Configuration): PathFilter = {
// Dummy jobconf to get to the pathFilter defined in configuration
// It's very expensive to create a JobConf(ClassUtil.findContainingJar() is slow)
val jobConf = new JobConf(hadoopConf, this.getClass)
val pathFilter = FileInputFormat.getInputPathFilter(jobConf)
path: Path => {
val name = path.getName
if (name != "_SUCCESS" && name != "_temporary" && !name.startsWith(".")) {
pathFilter == null || pathFilter.accept(path)
} else {
false
}
}
}
override def run(spark: SparkSession): Seq[Row] = {
val catalog = spark.sessionState.catalog
val table = catalog.getTableRawMetadata(tableName)
val tableIdentWithDB = table.identifier.quotedString
if (table.partitionColumnNames.isEmpty) {
throw new AnalysisException(
s"Operation not allowed: $cmd only works on partitioned tables: $tableIdentWithDB")
}
if (table.storage.locationUri.isEmpty) {
throw new AnalysisException(s"Operation not allowed: $cmd only works on table with " +
s"location provided: $tableIdentWithDB")
}
val root = new Path(table.location)
logInfo(s"Recover all the partitions in $root")
val hadoopConf = spark.sessionState.newHadoopConf()
val fs = root.getFileSystem(hadoopConf)
val droppedAmount = if (enableDropPartitions) {
dropPartitions(catalog, fs)
} else 0
val addedAmount = if (enableAddPartitions) {
val threshold = spark.sparkContext.conf.get(RDD_PARALLEL_LISTING_THRESHOLD)
val pathFilter = getPathFilter(hadoopConf)
val evalPool = ThreadUtils.newForkJoinPool("RepairTableCommand", 8)
val partitionSpecsAndLocs: GenSeq[(TablePartitionSpec, Path)] =
try {
scanPartitions(spark, fs, pathFilter, root, Map(), table.partitionColumnNames, threshold,
spark.sessionState.conf.resolver, new ForkJoinTaskSupport(evalPool)).seq
} finally {
evalPool.shutdown()
}
val total = partitionSpecsAndLocs.length
logInfo(s"Found $total partitions in $root")
val partitionStats = if (spark.sqlContext.conf.gatherFastStats) {
gatherPartitionStats(spark, partitionSpecsAndLocs, fs, pathFilter, threshold)
} else {
GenMap.empty[String, PartitionStatistics]
}
logInfo(s"Finished to gather the fast stats for all $total partitions.")
addPartitions(spark, table, partitionSpecsAndLocs, partitionStats)
total
} else 0
// Updates the table to indicate that its partition metadata is stored in the Hive metastore.
// This is always the case for Hive format tables, but is not true for Datasource tables created
// before Spark 2.1 unless they are converted via `msck repair table`.
spark.sessionState.catalog.alterTable(table.copy(tracksPartitionsInCatalog = true))
spark.catalog.refreshTable(tableIdentWithDB)
logInfo(s"Recovered all partitions: added ($addedAmount), dropped ($droppedAmount).")
Seq.empty[Row]
}
private def scanPartitions(
spark: SparkSession,
fs: FileSystem,
filter: PathFilter,
path: Path,
spec: TablePartitionSpec,
partitionNames: Seq[String],
threshold: Int,
resolver: Resolver,
evalTaskSupport: ForkJoinTaskSupport): GenSeq[(TablePartitionSpec, Path)] = {
if (partitionNames.isEmpty) {
return Seq(spec -> path)
}
val statuses = fs.listStatus(path, filter)
val statusPar: GenSeq[FileStatus] =
if (partitionNames.length > 1 && statuses.length > threshold || partitionNames.length > 2) {
// parallelize the list of partitions here, then we can have better parallelism later.
val parArray = new ParVector(statuses.toVector)
parArray.tasksupport = evalTaskSupport
parArray.seq
} else {
statuses
}
statusPar.flatMap { st =>
val name = st.getPath.getName
if (st.isDirectory && name.contains("=")) {
val ps = name.split("=", 2)
val columnName = ExternalCatalogUtils.unescapePathName(ps(0))
// TODO: Validate the value
val value = ExternalCatalogUtils.unescapePathName(ps(1))
if (resolver(columnName, partitionNames.head)) {
scanPartitions(spark, fs, filter, st.getPath, spec ++ Map(partitionNames.head -> value),
partitionNames.drop(1), threshold, resolver, evalTaskSupport)
} else {
logWarning(
s"expected partition column ${partitionNames.head}, but got ${ps(0)}, ignoring it")
Seq.empty
}
} else {
logWarning(s"ignore ${new Path(path, name)}")
Seq.empty
}
}
}
private def gatherPartitionStats(
spark: SparkSession,
partitionSpecsAndLocs: GenSeq[(TablePartitionSpec, Path)],
fs: FileSystem,
pathFilter: PathFilter,
threshold: Int): GenMap[String, PartitionStatistics] = {
if (partitionSpecsAndLocs.length > threshold) {
val hadoopConf = spark.sessionState.newHadoopConf()
val serializableConfiguration = new SerializableConfiguration(hadoopConf)
val serializedPaths = partitionSpecsAndLocs.map(_._2.toString).toArray
// Set the number of parallelism to prevent following file listing from generating many tasks
// in case of large #defaultParallelism.
val numParallelism = Math.min(serializedPaths.length,
Math.min(spark.sparkContext.defaultParallelism, 10000))
// gather the fast stats for all the partitions otherwise Hive metastore will list all the
// files for all the new partitions in sequential way, which is super slow.
logInfo(s"Gather the fast stats in parallel using $numParallelism tasks.")
spark.sparkContext.parallelize(serializedPaths, numParallelism)
.mapPartitions { paths =>
val pathFilter = getPathFilter(serializableConfiguration.value)
paths.map(new Path(_)).map{ path =>
val fs = path.getFileSystem(serializableConfiguration.value)
val statuses = fs.listStatus(path, pathFilter)
(path.toString, PartitionStatistics(statuses.length, statuses.map(_.getLen).sum))
}
}.collectAsMap()
} else {
partitionSpecsAndLocs.map { case (_, location) =>
val statuses = fs.listStatus(location, pathFilter)
(location.toString, PartitionStatistics(statuses.length, statuses.map(_.getLen).sum))
}.toMap
}
}
private def addPartitions(
spark: SparkSession,
table: CatalogTable,
partitionSpecsAndLocs: GenSeq[(TablePartitionSpec, Path)],
partitionStats: GenMap[String, PartitionStatistics]): Unit = {
val total = partitionSpecsAndLocs.length
var done = 0L
// Hive metastore may not have enough memory to handle millions of partitions in single RPC,
// we should split them into smaller batches. Since Hive client is not thread safe, we cannot
// do this in parallel.
val batchSize = 100
partitionSpecsAndLocs.toIterator.grouped(batchSize).foreach { batch =>
val now = MILLISECONDS.toSeconds(System.currentTimeMillis())
val parts = batch.map { case (spec, location) =>
val params = partitionStats.get(location.toString).map {
case PartitionStatistics(numFiles, totalSize) =>
// This two fast stat could prevent Hive metastore to list the files again.
Map(NUM_FILES -> numFiles.toString,
TOTAL_SIZE -> totalSize.toString,
// Workaround a bug in HiveMetastore that try to mutate a read-only parameters.
// see metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
DDL_TIME -> now.toString)
}.getOrElse(Map.empty)
// inherit table storage format (possibly except for location)
CatalogTablePartition(
spec,
table.storage.copy(locationUri = Some(location.toUri)),
params)
}
spark.sessionState.catalog.createPartitions(tableName, parts, ignoreIfExists = true)
done += parts.length
logDebug(s"Recovered ${parts.length} partitions ($done/$total so far)")
}
}
// Drops the partitions that do not exist in the file system
private def dropPartitions(catalog: SessionCatalog, fs: FileSystem): Int = {
val dropPartSpecs = ThreadUtils.parmap(
catalog.listPartitions(tableName),
"RepairTableCommand: non-existing partitions",
maxThreads = 8) { partition =>
partition.storage.locationUri.flatMap { uri =>
if (fs.exists(new Path(uri))) None else Some(partition.spec)
}
}.flatten
catalog.dropPartitions(
tableName,
dropPartSpecs,
ignoreIfNotExists = true,
purge = false,
// Since we have already checked that partition directories do not exist, we can avoid
// additional calls to the file system at the catalog side by setting this flag.
retainData = true)
dropPartSpecs.length
}
}
/**
* A command that sets the location of a table or a partition.
*
* For normal tables, this just sets the location URI in the table/partition's storage format.
* For datasource tables, this sets a "path" parameter in the table/partition's serde properties.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table_name [PARTITION partition_spec] SET LOCATION "loc";
* }}}
*/
case class AlterTableSetLocationCommand(
tableName: TableIdentifier,
partitionSpec: Option[TablePartitionSpec],
location: String)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val locUri = CatalogUtils.stringToURI(location)
partitionSpec match {
case Some(spec) =>
DDLUtils.verifyPartitionProviderIsHive(
sparkSession, table, "ALTER TABLE ... SET LOCATION")
// Partition spec is specified, so we set the location only for this partition
val normalizedSpec = PartitioningUtils.normalizePartitionSpec(
spec,
table.partitionSchema,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
val part = catalog.getPartition(table.identifier, normalizedSpec)
val newPart = part.copy(storage = part.storage.copy(locationUri = Some(locUri)))
catalog.alterPartitions(table.identifier, Seq(newPart))
case None =>
// No partition spec is specified, so we set the location for the table itself
catalog.alterTable(table.withNewStorage(locationUri = Some(locUri)))
}
sparkSession.catalog.refreshTable(table.identifier.quotedString)
CommandUtils.updateTableStats(sparkSession, table)
Seq.empty[Row]
}
}
object DDLUtils {
val HIVE_PROVIDER = "hive"
def isHiveTable(table: CatalogTable): Boolean = {
isHiveTable(table.provider)
}
def isHiveTable(provider: Option[String]): Boolean = {
provider.isDefined && provider.get.toLowerCase(Locale.ROOT) == HIVE_PROVIDER
}
def isDatasourceTable(table: CatalogTable): Boolean = {
table.provider.isDefined && table.provider.get.toLowerCase(Locale.ROOT) != HIVE_PROVIDER
}
def readHiveTable(table: CatalogTable): HiveTableRelation = {
HiveTableRelation(
table,
// Hive table columns are always nullable.
table.dataSchema.asNullable.toAttributes,
table.partitionSchema.asNullable.toAttributes)
}
/**
* Throws a standard error for actions that require partitionProvider = hive.
*/
def verifyPartitionProviderIsHive(
spark: SparkSession, table: CatalogTable, action: String): Unit = {
val tableName = table.identifier.table
if (!spark.sqlContext.conf.manageFilesourcePartitions && isDatasourceTable(table)) {
throw new AnalysisException(
s"$action is not allowed on $tableName since filesource partition management is " +
"disabled (spark.sql.hive.manageFilesourcePartitions = false).")
}
if (!table.tracksPartitionsInCatalog && isDatasourceTable(table)) {
throw new AnalysisException(
s"$action is not allowed on $tableName since its partition metadata is not stored in " +
"the Hive metastore. To import this information into the metastore, run " +
s"`msck repair table $tableName`")
}
}
/**
* If the command ALTER VIEW is to alter a table or ALTER TABLE is to alter a view,
* issue an exception [[AnalysisException]].
*
* Note: temporary views can be altered by both ALTER VIEW and ALTER TABLE commands,
* since temporary views can be also created by CREATE TEMPORARY TABLE. In the future,
* when we decided to drop the support, we should disallow users to alter temporary views
* by ALTER TABLE.
*/
def verifyAlterTableType(
catalog: SessionCatalog,
tableMetadata: CatalogTable,
isView: Boolean): Unit = {
if (!catalog.isTempView(tableMetadata.identifier)) {
tableMetadata.tableType match {
case CatalogTableType.VIEW if !isView =>
throw new AnalysisException(
"Cannot alter a view with ALTER TABLE. Please use ALTER VIEW instead")
case o if o != CatalogTableType.VIEW && isView =>
throw new AnalysisException(
s"Cannot alter a table with ALTER VIEW. Please use ALTER TABLE instead")
case _ =>
}
}
}
private[sql] def checkDataColNames(table: CatalogTable): Unit = {
checkDataColNames(table, table.dataSchema.fieldNames)
}
private[sql] def checkDataColNames(table: CatalogTable, colNames: Seq[String]): Unit = {
table.provider.foreach {
_.toLowerCase(Locale.ROOT) match {
case HIVE_PROVIDER =>
val serde = table.storage.serde
if (serde == HiveSerDe.sourceToSerDe("orc").get.serde) {
OrcFileFormat.checkFieldNames(colNames)
} else if (serde == HiveSerDe.sourceToSerDe("parquet").get.serde ||
serde == Some("parquet.hive.serde.ParquetHiveSerDe") ||
serde == Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe")) {
ParquetSchemaConverter.checkFieldNames(colNames)
}
case "parquet" => ParquetSchemaConverter.checkFieldNames(colNames)
case "orc" => OrcFileFormat.checkFieldNames(colNames)
case _ =>
}
}
}
/**
* Throws exception if outputPath tries to overwrite inputpath.
*/
def verifyNotReadPath(query: LogicalPlan, outputPath: Path) : Unit = {
val inputPaths = query.collect {
case LogicalRelation(r: HadoopFsRelation, _, _, _) =>
r.location.rootPaths
}.flatten
if (inputPaths.contains(outputPath)) {
throw new AnalysisException(
"Cannot overwrite a path that is also being read from.")
}
}
}
| BryanCutler/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala | Scala | apache-2.0 | 37,460 |
package ir.ast
import ir._
import ir.interpreter.Interpreter.ValueMap
import lift.arithmetic.Cst
/**
* Indexed array UnsafeArrayAccess pattern
*
* Code for this pattern can be generated
*/
case class UnsafeArrayAccess(index: Expr) extends Pattern(arity = 1) {
override def checkType(argType: Type,
setType: Boolean): Type = {
TypeChecker.check(index)
argType match {
case ArrayTypeWS(t, Cst(1)) =>
t // break with the definition of searches/reductions, as we have no "partial" accesses
case ArrayType(t) =>
t
case _ => throw new TypeException(argType, "ArrayType", this)
}
}
override def eval(valueMap: ValueMap, args: Any*): Any = {
assert(args.length == arity)
args.head match {
case a: Array[_] => a(index.eval(valueMap).asInstanceOf[Int])
}
}
}
| lift-project/lift | src/main/ir/ast/UnsafeArrayAccess.scala | Scala | mit | 855 |
/*****************************************
Emitting Generated Code
*******************************************/
class Snippet extends ((Int)=>(Int)) {
def apply(x0:Int): Int = {
val x1 = x0 == 0
val x3 = if (x1) {
val x2 = if (x1) {
0
} else {
2
}
x2
} else {
1
}
x3
}
}
/*****************************************
End of Generated Code
*******************************************/
| RomanTsegelskyi/lms-truffle | src/out/dynvar0.check.scala | Scala | gpl-2.0 | 451 |
package tin
import bin.A
import bin.B
import bin.C
import bin.D
import bon.G
class FixImport extends G {
val x = new /*ref*/E
}
/*
package tin
import bin._
import _root_.tin.bon.G
class FixImport extends G {
val x = new E
}
*/ | advancedxy/intellij-scala | testdata/autoImport/fixingImport/FixImport.scala | Scala | apache-2.0 | 234 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.nn.keras.{Embedding => BEmbedding}
import com.intel.analytics.bigdl.optim.Regularizer
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.bigdl.nn.{AddConstant => TAddConstant, InitializationMethod, LookupTable, RandomUniform, Sequential => TSequential}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.zoo.pipeline.api.Net
import com.intel.analytics.zoo.pipeline.api.keras.layers.utils.KerasUtils
import scala.reflect.ClassTag
/**
* Turn non-negative integers (indices) into dense vectors of fixed size.
* The input of this layer should be 2D.
*
* This layer can only be used as the first layer in a model, you need to provide the argument
* inputShape (a Single Shape, does not include the batch dimension).
*
* @param inputDim Int > 0. Size of the vocabulary, ie. 1 + maximum integer
* index occurring in the input data.
* Each word index in the input should be within range [0, inputDim-1].
* @param outputDim Int > 0. Dimension of the dense embedding.
* @param init Initialization method for the weights of the layer. Default is RandomUniform.
* You can also pass in corresponding string representations such as 'uniform'
* or 'normal', etc. for simple init methods in the factory method.
* @param initWeights Tensor. Initial weights set to this layer, which should be a Tensor of
* size (inputDim, outputDim). Default is null and in this case weights are
* initialized by the initialization method specified by 'init'.
* Otherwise, 'weights' will override 'init' to take effect.
* @param trainable Whether this layer is trainable or not. Default is true.
* @param wRegularizer An instance of [[Regularizer]], (eg. L1 or L2 regularization),
* applied to the embedding matrix. Default is null.
* @param inputShape A Single Shape, does not include the batch dimension.
* @param maskZero: if maskZero is set to true, the input whose value equals `paddingValue`
* the output will be masked to zero vector.
* @param paddingValue padding value, default 0
* @param zeroBasedId default true and input should be 0 based. Otherwise need to be 1 base
* @tparam T Numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class Embedding[T: ClassTag](
override val inputDim: Int,
override val outputDim: Int,
override val init: InitializationMethod = RandomUniform,
val initWeights: Tensor[T] = null,
val trainable: Boolean = true,
wRegularizer: Regularizer[T] = null,
inputShape: Shape = null,
maskZero: Boolean = false,
paddingValue: Int = 0,
zeroBasedId: Boolean = true
)(implicit ev: TensorNumeric[T])
extends BEmbedding[T] (
inputDim, outputDim, init, wRegularizer, inputShape) with Net {
require(inputDim > 0, s"inputDim of Embedding must be a positive integer, but got $inputDim")
require(outputDim > 0, s"outputDim of Embedding must be a positive integer, but got $outputDim")
if (initWeights != null) {
require(initWeights.size().sameElements(Array(inputDim, outputDim)),
"weights size should match (inputDim, outputDim)")
}
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val model = TSequential[T]()
if (zeroBasedId) {
model.add(TAddConstant(1.0))
}
val layer = LookupTable(
nIndex = inputDim,
nOutput = outputDim,
wRegularizer = wRegularizer,
maskZero = maskZero,
paddingValue = paddingValue)
if (initWeights != null) {
layer.setWeightsBias(Array(initWeights))
}
else {
layer.setInitMethod(weightInitMethod = init, biasInitMethod = init)
}
model.add(layer)
if (! trainable) model.freeze()
model.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
override private[zoo] def toKeras2(): String = {
val params = Net.inputShapeToString(inputShape) ++
Net.param(getName()) ++
Net.param(inputDim, "input_dim") ++
Net.param(outputDim, "output_dim") ++
Net.param(maskZero, "mask_zero")
Net.kerasDef(this, params)
}
}
object Embedding {
def apply[@specialized(Float, Double) T: ClassTag](
inputDim: Int,
outputDim: Int,
init: String = "uniform",
weights: Tensor[T] = null,
trainable: Boolean = true,
wRegularizer: Regularizer[T] = null,
inputLength: Int = -1,
maskZero: Boolean = false,
paddingValue: Int = 0,
zeroBasedId: Boolean = true
)(implicit ev: TensorNumeric[T]): Embedding[T] = {
// Remark: It is possible that inputShape is specified in Input node or layer.
val shape = if (inputLength > 0) Shape(inputLength) else null
new Embedding[T](inputDim, outputDim, KerasUtils.getInitMethod(init),
weights, trainable, wRegularizer, shape, maskZero, paddingValue, zeroBasedId)
}
}
| intel-analytics/analytics-zoo | zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/Embedding.scala | Scala | apache-2.0 | 6,041 |
/* SyncQueue.scala
*
* Copyright (c) 2013 bizo.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package squishy
import com.amazonaws.services.sqs.model._
/**
* A wrapper around a synchronous SNS queue that supports idiomatic Scala.
*
* Note: implementations will transparently acquire and cache this queue's URL when required. See the note about queue
* URLs in the class description of [[squishy.Queue]].
*
* @tparam M The type of message that can be sent to and received from this queue.
*/
trait SyncQueue[M] extends Queue[M] {
/** Returns true if this queue exists in the cloud. */
def exists: Boolean = queueUrl.isDefined
/** Returns the URL of this queue if it exists or `None` if it does not. */
def queueUrl: Option[String] =
cachedQueueUrl match {
case Some(queueUrl) =>
queueUrl
case None =>
val request = newGetQueueUrlRequest()
val result = retryPolicy.retry("Queue(%s).queueUrl" format queueName) {
try {
Some(sqsClient.getQueueUrl(request))
} catch {
case e: QueueDoesNotExistException => None
}
}
val queueUrl = Some(result map getQueueUrlResultToQueueUrl)
synchronized(if (cachedQueueUrl.isEmpty) cachedQueueUrl = queueUrl)
cachedQueueUrl.get
}
/** Returns all of the attributes of this queue if it exists or an empty set if it does not. */
def attributes: Queue.AttributeSet =
attributes(Queue.keys: _*)
/**
* Returns the specified attributes of this queue if it exists or an empty set if it does not.
*
* @param keys The keys that identify the attributes to return.
*/
def attributes(keys: Queue.Key[_]*): Queue.AttributeSet =
queueUrl match {
case Some(queueUrl) =>
val request = newGetQueueAttributesRequest(queueUrl, keys: _*)
val result = retryPolicy.retry(s"Queue($queueName).attributes")(sqsClient.getQueueAttributes(request))
getQueueAttributesResultToAttributeSet(result)
case None =>
Queue.AttributeSet()
}
/**
* Sets the attributes of this queue, throwing an exception if it does not exist.
*
* @param attributes The attributes to configure for this queue.
*/
def attributes_=(attributes: Seq[Queue.MutableAttribute[_]]): Unit = {
val request = newSetQueueAttributesRequest(requireQueueUrl, attributes)
retryPolicy.retry(s"Queue($queueName).attributes = ...")(sqsClient.setQueueAttributes(request))
}
/**
* Creates this queue in the cloud if it does not already exist.
*
* All unspecified attributes will default to the values specified by Amazon SQS.
*
* @param attributes The attributes to configure for this queue.
*/
def createQueue(attributes: Queue.MutableAttribute[_]*): Unit = {
val request = newCreateQueueRequest(attributes)
val result = retryPolicy.retry(s"Queue($queueName).createQueue")(sqsClient.createQueue(request))
val queueUrl = Some(Some(createQueueResultToQueueUrl(result)))
synchronized(cachedQueueUrl = queueUrl)
}
/** Deletes this queue in the cloud if it exists. */
def deleteQueue_!(): Unit = {
val request = newDeleteQueueRequest(requireQueueUrl)
retryPolicy.retry(s"Queue($queueName).deleteQueue_!")(sqsClient.deleteQueue(request))
val queueUrl = Some(None)
synchronized(cachedQueueUrl = queueUrl)
}
/**
* Sends a message to this queue.
*
* All optional parameters of this method will default to the values specified by Amazon SQS.
*
* @param msg The body of the message to send.
* @param delaySeconds The number of seconds to delay message availability.
*/
def send(msg: M, delaySeconds: Int = -1): Message.Sent[M] = {
val request = newSendMessageRequest(requireQueueUrl, msg, delaySeconds)
val result = retryPolicy.retry(s"Queue($queueName).send")(sqsClient.sendMessage(request))
sendMessageResultToMessage(result, msg)
}
/**
* Sends a batch of messages to this queue.
*
* All optional parameters of this method will default to the values specified by Amazon SQS.
*
* @param entries The entries representing the messages to send. These must be of type `M` for immediate messages or
* `(M, Int)` for messages with an initial delay.
*/
def sendBatch[E: BatchEntry](entries: E*): Seq[Message[M]] = {
val entry = implicitly[BatchEntry[E]]
val messages = entries map entry
val request = newSendMessageBatchRequest(requireQueueUrl, messages)
val result = retryPolicy.retry(s"Queue($queueName).sendBatch")(sqsClient.sendMessageBatch(request))
sendMessageBatchResultToMessages(result, messages)
}
/**
* Attempts to receive one or more messages from this queue.
*
* All optional parameters of this method will default to the values specified by Amazon SQS.
*
* @param maxNumberOfMessages The maximum number of messages to receive.
* @param visibilityTimeout The number of seconds to prevent other consumers from seeing received messages.
* @param waitTimeSeconds The maximum number of seconds to wait for a message.
* @param attributes The keys of the message attributes that should be returned along with the messages.
*/
def receive(
maxNumberOfMessages: Int = -1,
visibilityTimeout: Int = -1,
waitTimeSeconds: Int = -1,
attributes: Seq[Message.Key[_]] = Seq.empty //
): Seq[Message.Receipt[M]] = {
val request = newReceiveMessageRequest(
requireQueueUrl,
maxNumberOfMessages,
visibilityTimeout,
waitTimeSeconds,
attributes)
val result = retryPolicy.retry(s"Queue($queueName).receive")(sqsClient.receiveMessage(request))
receiveMessageResultToMessages(result)
}
/**
* Attempts to extend the time that a message is invisible to other consumers.
*
* @param receipt The receipt of the message to modify the visibility of.
* @param visibilityTimeout The number of seconds to extends the message's visibility timeout.
*/
def changeVisibility(receipt: Message.Receipt[M], visibilityTimeout: Int): Message.Changed[M] = {
val request = newChangeMessageVisibilityRequest(requireQueueUrl, receipt, visibilityTimeout)
retryPolicy.retry(s"Queue($queueName).changeVisibility")(sqsClient.changeMessageVisibility(request))
changeMessageVisibilityResultToMessage(receipt)
}
/**
* Attempts to extend the time that a batch of messages is invisible to other consumers.
*
* @param entries The entries representing the messages to change the visibility of with their new visibility timeout.
*/
def changeVisibilityBatch(entries: (Message.Receipt[M], Int)*): Seq[Message[M]] = {
val request = newChangeMessageVisibilityBatchRequest(requireQueueUrl, entries)
val result = retryPolicy.retry(s"Queue($queueName).changeVisibilityBatch") {
sqsClient.changeMessageVisibilityBatch(request)
}
changeMessageVisibilityBatchResultToMessages(result, entries)
}
/**
* Attempts to delete a message from this queue.
*
* @param receipt The receipt of the message to delete from the queue.
*/
def delete(receipt: Message.Receipt[M]): Message.Deleted[M] = {
val request = newDeleteMessageRequest(requireQueueUrl, receipt)
retryPolicy.retry(s"Queue($queueName).delete")(sqsClient.deleteMessage(request))
deleteMessageResultToMessage(receipt)
}
/**
* Attempts to delete a batch of messages from this queue.
*
* @param receipts The receipts of the messages to delete from the queue.
*/
def deleteBatch(receipts: Message.Receipt[M]*): Seq[Message[M]] = {
val request = newDeleteMessageBatchRequest(requireQueueUrl, receipts)
val result = retryPolicy.retry(s"Queue($queueName).deleteBatch")(sqsClient.deleteMessageBatch(request))
deleteMessageBatchResultToMessages(result, receipts)
}
/** Returns the queue URL or throws an exception if it does not exist. */
private def requireQueueUrl: String =
queueUrl getOrElse { throw new QueueDoesNotExistException(s"Queue $queueName does not exist.") }
} | lpryor/squishy | src/main/scala/squishy/SyncQueue.scala | Scala | apache-2.0 | 8,569 |
package com.softwaremill.codebrag.service.email.sender
case class AttachmentDescription(content: Array[Byte], filename: String, contentType: String)
| softwaremill/codebrag | codebrag-service/src/main/scala/com/softwaremill/codebrag/service/email/sender/AttachmentDescription.scala | Scala | agpl-3.0 | 150 |
package scalanlp.classify
import scalanlp.data.Example
import scalala.tensor.Counter
import scalala.generic.collection.CanCreateZerosLike
import scalala.operators._
/**
*
* @author dlwh
*/
@serializable
trait Perceptron[L,-T] extends Classifier[L,T];
object Perceptron {
class Trainer[L,T](maxPasses: Int = 20)(implicit dotProduct: BinaryOp[T,T,OpMulInner,Double],
zeros: CanCreateZerosLike[T,T],
numeric: T=>MutableNumericOps[T],
upAdd: BinaryUpdateOp[T,T,OpAdd],
upSub: BinaryUpdateOp[T,T,OpSub]) extends Classifier.Trainer[L,T] {
type MyClassifier = Perceptron[L,T];
def train(it: Iterable[Example[L,T]]) = {
val weights = new collection.mutable.HashMap[L,T]();
val result = new Perceptron[L,T] {
def scores(o: T) = {
val r = Counter[L,Double];
for((l,w) <- weights) {
r(l) = w dot o;
}
r
}
};
for( i <- 0 until maxPasses; ex <- it) {
val l = ex.label;
val feats = ex.features;
val ctr = result.scores(feats);
if(ctr.size == 0 || ctr.argmax != l) {
weights.getOrElseUpdate(l,zeros(feats)) += feats;
if(ctr.size != 0) {
weights.getOrElseUpdate(ctr.argmax,zeros(feats)) -= feats;
}
}
}
result;
}
}
}
| MLnick/scalanlp-core | learn/src/main/scala/scalanlp/classify/Perceptron.scala | Scala | apache-2.0 | 1,406 |
/**
* (C) Copyright IBM Corp. 2015 - 2017
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.ibm.sparktc.sparkbench.workload.exercise
import com.ibm.sparktc.sparkbench.utils.GeneralFunctions.{getOrDefault, time}
import com.ibm.sparktc.sparkbench.utils.SaveModes
import com.ibm.sparktc.sparkbench.workload.{Workload, WorkloadDefaults}
import org.apache.spark.sql.{DataFrame, SparkSession}
import scala.math.random
case class SparkPiResult(
name: String,
timestamp: Long,
total_runtime: Long,
pi_approximate: Double
)
object SparkPi extends WorkloadDefaults {
val name = "sparkpi"
def apply(m: Map[String, Any]) =
new SparkPi(input = m.get("input").map(_.asInstanceOf[String]),
output = None,
slices = getOrDefault[Int](m, "slices", 2)
)
}
case class SparkPi(input: Option[String] = None,
output: Option[String] = None,
saveMode: String = SaveModes.error,
slices: Int
) extends Workload {
// Taken directly from Spark Examples:
// https://github.com/apache/spark/blob/master/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
private def sparkPi(spark: SparkSession): Double = {
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if ((x * x) + (y * y) <= 1) 1 else 0
}.reduce(_ + _)
val piApproximate = 4.0 * count / (n - 1)
piApproximate
}
override def doWorkload(df: Option[DataFrame] = None, spark: SparkSession): DataFrame = {
val timestamp = System.currentTimeMillis()
val (t, pi) = time(sparkPi(spark))
spark.createDataFrame(Seq(SparkPiResult("sparkpi", timestamp, t, pi)))
}
}
| ecurtin/spark-bench | cli/src/main/scala/com/ibm/sparktc/sparkbench/workload/exercise/SparkPi.scala | Scala | apache-2.0 | 2,470 |
package fpinscala.chap04.errorhandling
import scala.util.{Try, Success, Failure}
object Parsing {
/**
* Traverse a list and apply a function that can fail to each element.
*
* Take a list, apply a function which returns an Option
* to each element of the list and if none are None,
* return an Option of a list of all the values in the
* Somes, otherwise return a None.
*
* @param as List of type A.
* @param f A function of type A => Option[B].
* @return An Option[List[B]].
*
*/
def traverse[A,B](as: List[A])(f: A => Option[B]): Option[List[B]] =
as.foldRight(Some(Nil): Option[List[B]])(
(a, bsO) =>
for {
bs <- bsO
b <- f(a)
} yield b :: bs
)
/**
* Parse a list of Doubles.
*
* Take a list of strings and return an Option of a List
* of Doubles if all can be converted.
*/
def parseDoubles(ss: List[String]): Option[List[Double]] =
traverse(ss)(s => Try(s.toDouble).toOption)
}
object scalaErrorhandling{
// Define some utility functions
/**
* Evaluate and nicely print expresion - let any
* exceptions happen before anything printed.
*/
def evalP0[A](expr: => A, fname: String): Unit = {
val result = expr // Let any exceptions happen before anything printed.
print(fname ++ " = "); println(result)
}
/**
* Evaluate and nicely print function of one argument - let any
* exceptions happen before anything printed.
*/
def evalP1[A,B](arg: => A, f: A => B, fname: String): Unit = {
val result = f(arg)
print(fname); print("("); print(arg); print(") = ")
println(result)
}
/**
* Evaluate and nicely print function of two arguments - let any
* exceptions happen before anything printed.
*/
def evalP2[A,B,C](arg1: => A, arg2: => B, f: (A,B) => C,
fname: String): Unit = {
val result = f(arg1, arg2)
print(fname); print("("); print(arg1)
print(", "); print(arg2); print(") = ")
println(result)
}
/** Computes the mean of a dataset of Doubles */
def mean(xs: Seq[Double]): Option[Double] =
if (xs.isEmpty) None
else Some(xs.sum/xs.size)
/** Test package */
def main(args: Array[String]): Unit = {
import Parsing._
// Test flatten and flatmap
println("\nTest flatten:\n")
val mule = Some(42)
val goat = Some(Some(42))
val frog: Option[Option[Double]] = None
evalP0(mule, "mule")
// evalP0(mule.flatten, "mule.flatten") /* Does not compile. */
evalP0(goat, "goat")
evalP0(goat.flatten, "goat.flatten")
evalP0(frog, "frog")
evalP0(frog.flatten, "frog.flatten")
println("\nTest traverse via parseDoubles:\n")
val goodDoubleStrings = List("1.2", "3.14159", "10.3", "6", "7.1")
val goodDoubles = parseDoubles(goodDoubleStrings)
val goodMean = goodDoubles flatMap mean
val failDoubleStrings = List("1.2", "3.14159", "Fred", "6", "8.9")
val failDoubles = parseDoubles(failDoubleStrings)
val failMean = failDoubles flatMap mean
val noDoubleStrings: List[String] = List()
val noDoubles = parseDoubles(noDoubleStrings)
val noMean = noDoubles flatMap mean
evalP0(goodDoubleStrings, "goodDoubleStrings")
evalP0(goodDoubles, "goodDoubles")
evalP0(goodMean, "goodMean")
evalP0(failDoubleStrings, "failDoubleStrings")
evalP0(failDoubles, "failDoubles")
evalP0(failMean, "failMean")
evalP0(noDoubleStrings, "noDoubleStrings")
evalP0(noDoubles, "noDoubles")
evalP0(noMean, "noMean")
println()
}
}
| grscheller/scheller-linux-archive | grok/Scala2/fpinscala/src/main/scala/fpinscala/errorhandling/exerciseCode/scalaErrorhandling.scala | Scala | bsd-3-clause | 3,602 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import org.scalatest.junit.JUnit3Suite
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.TestUtils._
import junit.framework.Assert._
import kafka.utils.{ZkUtils, Utils, TestUtils}
import kafka.controller.{ControllerContext, LeaderIsrAndControllerEpoch, ControllerChannelManager}
import kafka.cluster.Broker
import kafka.common.ErrorMapping
import kafka.api._
class LeaderElectionTest extends JUnit3Suite with ZooKeeperTestHarness {
val brokerId1 = 0
val brokerId2 = 1
val port1 = TestUtils.choosePort()
val port2 = TestUtils.choosePort()
val configProps1 = TestUtils.createBrokerConfig(brokerId1, port1)
val configProps2 = TestUtils.createBrokerConfig(brokerId2, port2)
var servers: Seq[KafkaServer] = Seq.empty[KafkaServer]
var staleControllerEpochDetected = false
override def setUp() {
super.setUp()
// start both servers
val server1 = TestUtils.createServer(new KafkaConfig(configProps1))
val server2 = TestUtils.createServer(new KafkaConfig(configProps2))
servers ++= List(server1, server2)
}
override def tearDown() {
servers.map(server => server.shutdown())
servers.map(server => Utils.rm(server.config.logDirs))
super.tearDown()
}
def testLeaderElectionAndEpoch {
// start 2 brokers
val topic = "new-topic"
val partitionId = 0
// create topic with 1 partition, 2 replicas, one on each broker
val leader1 = createTopic(zkClient, topic, partitionReplicaAssignment = Map(0 -> Seq(0, 1)), servers = servers)(0)
val leaderEpoch1 = ZkUtils.getEpochForPartition(zkClient, topic, partitionId)
debug("leader Epoc: " + leaderEpoch1)
debug("Leader is elected to be: %s".format(leader1.getOrElse(-1)))
assertTrue("Leader should get elected", leader1.isDefined)
// NOTE: this is to avoid transient test failures
assertTrue("Leader could be broker 0 or broker 1", (leader1.getOrElse(-1) == 0) || (leader1.getOrElse(-1) == 1))
assertEquals("First epoch value should be 0", 0, leaderEpoch1)
// kill the server hosting the preferred replica
servers.last.shutdown()
// check if leader moves to the other server
val leader2 = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId,
oldLeaderOpt = if(leader1.get == 0) None else leader1)
val leaderEpoch2 = ZkUtils.getEpochForPartition(zkClient, topic, partitionId)
debug("Leader is elected to be: %s".format(leader1.getOrElse(-1)))
debug("leader Epoc: " + leaderEpoch2)
assertEquals("Leader must move to broker 0", 0, leader2.getOrElse(-1))
if(leader1.get == leader2.get)
assertEquals("Second epoch value should be " + leaderEpoch1+1, leaderEpoch1+1, leaderEpoch2)
else
assertEquals("Second epoch value should be %d".format(leaderEpoch1+1) , leaderEpoch1+1, leaderEpoch2)
servers.last.startup()
servers.head.shutdown()
Thread.sleep(zookeeper.tickTime)
val leader3 = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId,
oldLeaderOpt = if(leader2.get == 1) None else leader2)
val leaderEpoch3 = ZkUtils.getEpochForPartition(zkClient, topic, partitionId)
debug("leader Epoc: " + leaderEpoch3)
debug("Leader is elected to be: %s".format(leader3.getOrElse(-1)))
assertEquals("Leader must return to 1", 1, leader3.getOrElse(-1))
if(leader2.get == leader3.get)
assertEquals("Second epoch value should be " + leaderEpoch2, leaderEpoch2, leaderEpoch3)
else
assertEquals("Second epoch value should be %d".format(leaderEpoch2+1) , leaderEpoch2+1, leaderEpoch3)
}
def testLeaderElectionWithStaleControllerEpoch() {
// start 2 brokers
val topic = "new-topic"
val partitionId = 0
// create topic with 1 partition, 2 replicas, one on each broker
val leader1 = createTopic(zkClient, topic, partitionReplicaAssignment = Map(0 -> Seq(0, 1)), servers = servers)(0)
val leaderEpoch1 = ZkUtils.getEpochForPartition(zkClient, topic, partitionId)
debug("leader Epoc: " + leaderEpoch1)
debug("Leader is elected to be: %s".format(leader1.getOrElse(-1)))
assertTrue("Leader should get elected", leader1.isDefined)
// NOTE: this is to avoid transient test failures
assertTrue("Leader could be broker 0 or broker 1", (leader1.getOrElse(-1) == 0) || (leader1.getOrElse(-1) == 1))
assertEquals("First epoch value should be 0", 0, leaderEpoch1)
// start another controller
val controllerId = 2
val controllerConfig = new KafkaConfig(TestUtils.createBrokerConfig(controllerId, TestUtils.choosePort()))
val brokers = servers.map(s => new Broker(s.config.brokerId, "localhost", s.config.port))
val controllerContext = new ControllerContext(zkClient, 6000)
controllerContext.liveBrokers = brokers.toSet
val controllerChannelManager = new ControllerChannelManager(controllerContext, controllerConfig)
controllerChannelManager.startup()
val staleControllerEpoch = 0
val leaderAndIsr = new collection.mutable.HashMap[(String, Int), LeaderIsrAndControllerEpoch]
leaderAndIsr.put((topic, partitionId),
new LeaderIsrAndControllerEpoch(new LeaderAndIsr(brokerId2, List(brokerId1, brokerId2)), 2))
val partitionStateInfo = leaderAndIsr.mapValues(l => new PartitionStateInfo(l, Set(0,1))).toMap
val leaderAndIsrRequest = new LeaderAndIsrRequest(partitionStateInfo, brokers.toSet, controllerId,
staleControllerEpoch, 0, "")
controllerChannelManager.sendRequest(brokerId2, leaderAndIsrRequest, staleControllerEpochCallback)
TestUtils.waitUntilTrue(() => staleControllerEpochDetected == true,
"Controller epoch should be stale")
assertTrue("Stale controller epoch not detected by the broker", staleControllerEpochDetected)
controllerChannelManager.shutdown()
}
private def staleControllerEpochCallback(response: RequestOrResponse): Unit = {
val leaderAndIsrResponse = response.asInstanceOf[LeaderAndIsrResponse]
staleControllerEpochDetected = leaderAndIsrResponse.errorCode match {
case ErrorMapping.StaleControllerEpochCode => true
case _ => false
}
}
} | stealthly/kafka | core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala | Scala | apache-2.0 | 7,066 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import com.mongodb.async.client.{DistinctIterable, MongoIterable}
import org.mongodb.scala.model.Collation
import org.scalamock.scalatest.proxy.MockFactory
import org.scalatest.{FlatSpec, Matchers}
class DistinctObservableSpec extends FlatSpec with Matchers with MockFactory {
"DistinctObservable" should "have the same methods as the wrapped DistinctObservable" in {
val mongoIterable: Set[String] = classOf[MongoIterable[Document]].getMethods.map(_.getName).toSet
val wrapped = classOf[DistinctIterable[Document]].getMethods.map(_.getName).toSet -- mongoIterable
val local = classOf[DistinctObservable[Document]].getMethods.map(_.getName).toSet
wrapped.foreach((name: String) => {
val cleanedName = name.stripPrefix("get")
assert(local.contains(name) | local.contains(cleanedName.head.toLower + cleanedName.tail))
})
}
it should "call the underlying methods" in {
val wrapper = mock[DistinctIterable[Document]]
val observable = DistinctObservable(wrapper)
val filter = Document("a" -> 1)
val duration = Duration(1, TimeUnit.SECONDS)
val collation = Collation.builder().locale("en").build()
val batchSize = 10
val observer = new Observer[Document]() {
override def onError(throwable: Throwable): Unit = {}
override def onSubscribe(subscription: Subscription): Unit = subscription.request(Long.MaxValue)
override def onComplete(): Unit = {}
override def onNext(doc: Document): Unit = {}
}
wrapper.expects(Symbol("filter"))(filter).once()
wrapper.expects(Symbol("maxTime"))(duration.toMillis, TimeUnit.MILLISECONDS).once()
wrapper.expects(Symbol("collation"))(collation).once()
wrapper.expects(Symbol("getBatchSize"))().once()
wrapper.expects(Symbol("batchSize"))(Int.MaxValue).once()
wrapper.expects(Symbol("batchCursor"))(*).once()
observable.filter(filter)
observable.maxTime(duration)
observable.collation(collation)
observable.subscribe(observer)
wrapper.expects(Symbol("batchSize"))(batchSize).once()
wrapper.expects(Symbol("getBatchSize"))().once()
observable.batchSize(batchSize)
observable.subscribe(observer)
}
}
| rozza/mongo-scala-driver | driver/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala | Scala | apache-2.0 | 2,896 |
package io.netflow
package flows.cflow
import java.net.{ InetAddress, InetSocketAddress }
import java.util.UUID
import com.datastax.driver.core.utils.UUIDs
import com.twitter.util.Future
import io.netflow.actors.SenderWorker
import io.netflow.flows.cflow.TemplateFields._
import io.netflow.lib._
import io.netty.buffer._
import io.wasted.util._
import net.liftweb.json._
import org.joda.time.DateTime
import scala.util.{ Failure, Success, Try }
/**
* NetFlow Version 9 Packet - FlowSet DataSet
*
* *-------*---------------*------------------------------------------------------*
* | Bytes | Contents | Description |
* *-------*---------------*------------------------------------------------------*
* | 0-1 | version | The version of NetFlow records exported 009 |
* *-------*---------------*------------------------------------------------------*
* | 2-3 | count | Number of flows exported in this packet (1-30) |
* *-------*---------------*------------------------------------------------------*
* | 4-7 | SysUptime | Current time in milli since the export device booted |
* *-------*---------------*------------------------------------------------------*
* | 8-11 | unix_secs | Current count of seconds since 0000 UTC 1970 |
* *-------*---------------*------------------------------------------------------*
* | 12-15 | PackageSeq | Sequence counter of total flows exported |
* *-------*---------------*------------------------------------------------------*
* | 16-19 | Source ID | engine type+engine id |
* *-------*---------------*------------------------------------------------------*
* | 20- | others | Unused (zero) bytes |
* *-------*---------------*------------------------------------------------------*
*/
object NetFlowV9Packet extends Logger {
private val headerSize = 20
private def parseExtraFields = NodeConfig.values.netflow.extraFields
/**
* Parse a v9 Flow Packet
*
* @param sender The sender's InetSocketAddress
* @param buf Netty ByteBuf containing the UDP Packet
* @param actor Actor which holds the Templates for async saves
*/
def apply(sender: InetSocketAddress, buf: ByteBuf, actor: SenderWorker): Try[NetFlowV9Packet] = Try[NetFlowV9Packet] {
val length = buf.readableBytes()
val version = buf.getUnsignedInteger(0, 2).toInt
if (version != 9) return Failure(new InvalidFlowVersionException(version))
val senderIP = sender.getAddress.getHostAddress
val senderPort = sender.getPort
if (length < headerSize)
return Failure(new IncompleteFlowPacketHeaderException)
val count = buf.getUnsignedInteger(2, 2).toInt
val uptime = buf.getUnsignedInteger(4, 4)
val timestamp = new DateTime(buf.getUnsignedInteger(8, 4) * 1000)
val id = UUIDs.startOf(timestamp.getMillis)
val flowSequence = buf.getUnsignedInteger(12, 4)
val sourceId = buf.getUnsignedInteger(16, 4)
var flowsetCounter = 0
var packetOffset = headerSize
// we use a mutable array here in order not to bash the garbage collector so badly
// because whenever we append something to our vector, the old vectors need to get GC'd
val flows = scala.collection.mutable.ArrayBuffer[Flow[_]]()
while (flowsetCounter < count && packetOffset < length) {
val flowsetId = buf.getUnsignedInteger(packetOffset, 2).toInt
val flowsetLength = buf.getUnsignedInteger(packetOffset + 2, 2).toInt
if (flowsetLength == 0) return Failure(new IllegalFlowSetLengthException)
if (packetOffset + flowsetLength > length) return Failure(new ShortFlowPacketException)
flowsetId match {
case 0 | 2 => // template flowset - 0 NetFlow v9, 2 IPFIX
var templateOffset = packetOffset + 4 // add the 4 byte flowset Header
debug("Template FlowSet (" + flowsetId + ") from " + senderIP + ":" + senderPort)
do {
val fieldCount = buf.getUnsignedShort(templateOffset + 2)
val templateSize = fieldCount * 4 + 4
if (templateOffset + templateSize < length) {
val buffer = buf.slice(templateOffset, templateSize)
NetFlowV9Template(sender, buffer, id, flowsetId, timestamp) match {
case Success(tmpl) =>
actor.setTemplate(tmpl)
flows += tmpl
case Failure(e) => warn(e.toString)
}
flowsetCounter += 1
}
templateOffset += templateSize
} while (templateOffset - packetOffset < flowsetLength)
case 1 | 3 => // template flowset - 1 NetFlow v9, 3 IPFIX
debug("OptionTemplate FlowSet (" + flowsetId + ") from " + senderIP + ":" + senderPort)
var templateOffset = packetOffset + 4 // add the 4 byte flowset Header
do {
val scopeLen = buf.getUnsignedInteger(templateOffset + 2, 2).toInt
val optionLen = buf.getUnsignedInteger(templateOffset + 4, 2).toInt
val templateSize = scopeLen + optionLen + 6
if (templateOffset + templateSize < length) {
val buffer = buf.slice(templateOffset, templateSize)
NetFlowV9Template(sender, buffer, id, flowsetId, timestamp) match {
case Success(tmpl) =>
actor.setTemplate(tmpl)
flows += tmpl
case Failure(e) => warn(e.toString); e.printStackTrace()
}
flowsetCounter += 1
}
templateOffset += templateSize
} while (templateOffset - packetOffset < flowsetLength)
case a: Int if a > 255 => // flowset - templateId == flowsetId
actor.templates.get(flowsetId).
filter(_.isInstanceOf[NetFlowV9Template]).
map(_.asInstanceOf[NetFlowV9Template]).
foreach { tmpl =>
val option = tmpl.flowsetId == 1
var recordOffset = packetOffset + 4 // add the 4 byte flowset Header
while (recordOffset - packetOffset + tmpl.length <= flowsetLength) {
val buffer = buf.slice(recordOffset, tmpl.length)
val flow =
if (option) optionRecord(sender, buffer, id, tmpl, uptime, timestamp)
else dataRecord(sender, buffer, id, tmpl, uptime, timestamp)
flow match {
case Success(flow) => flows += flow
case Failure(e) => warn(e.toString)
}
flowsetCounter += 1
recordOffset += tmpl.length
}
}
case a: Int => debug("Unexpected TemplateId (" + a + ")")
}
packetOffset += flowsetLength
}
NetFlowV9Packet(id, sender, length, uptime, timestamp, flows.toList, flowSequence, sourceId)
}
/**
* Parse a Version 9 Flow
*
* @param sender The sender's InetSocketAddress
* @param buf Netty ByteBuf containing the UDP Packet
* @param fpId FlowPacket-UUID this Flow arrived in
* @param template NetFlow Template for this Flow
* @param timestamp DateTime when this flow was exported
*/
def dataRecord(sender: InetSocketAddress, buf: ByteBuf, fpId: UUID, template: NetFlowV9Template,
uptime: Long, timestamp: DateTime) = Try[NetFlowV9Data] {
val srcPort = buf.getUnsignedInteger(template, L4_SRC_PORT).get.toInt
val dstPort = buf.getUnsignedInteger(template, L4_DST_PORT).get.toInt
val srcAS = buf.getUnsignedInteger(template, SRC_AS).map(_.toInt).filter(_ != -1)
val dstAS = buf.getUnsignedInteger(template, DST_AS).map(_.toInt).filter(_ != -1)
val proto = (buf.getUnsignedInteger(template, PROT) getOrElse -1L).toInt
val tos = (buf.getUnsignedInteger(template, SRC_TOS) getOrElse -1L).toInt
// calculate the offset from uptime and subtract that from the timestamp
val start = buf.getUnsignedInteger(template, FIRST_SWITCHED).filter(_ == 0).
map(x => timestamp.minus(uptime - x))
val stop = buf.getUnsignedInteger(template, LAST_SWITCHED).filter(_ == 0).
map(x => timestamp.minus(uptime - x))
val tcpflags = (buf.getUnsignedInteger(template, TCP_FLAGS) getOrElse -1L).toInt
val srcAddress = buf.getInetAddress(template, IPV4_SRC_ADDR, IPV6_SRC_ADDR)
val dstAddress = buf.getInetAddress(template, IPV4_DST_ADDR, IPV6_DST_ADDR)
val nextHop = Option(buf.getInetAddress(template, IPV4_NEXT_HOP, IPV6_NEXT_HOP)).
filter(_.getHostAddress != "0.0.0.0") // FIXME filter v6
val pkts = buf.getUnsignedInteger(template, InPKTS, OutPKTS).get
val bytes = buf.getUnsignedInteger(template, InBYTES, OutBYTES).get
val extraFields: Map[String, Long] = if (!parseExtraFields) Map() else template.getExtraFields(buf)
NetFlowV9Data(UUIDs.timeBased(), sender, buf.readableBytes(), template.number, uptime, timestamp,
srcPort, dstPort, srcAS, dstAS, pkts, bytes, proto,
tos, tcpflags, start, stop, srcAddress, dstAddress, nextHop, extraFields, fpId)
}
/**
* Parse a Version 9 Option Flow
*
* @param sender The sender's InetSocketAddress
* @param buf Netty ByteBuf containing the UDP Packet
* @param fpId FlowPacket-UUID which this Flow arrived in
* @param template NetFlow Template for this Flow
* @param timestamp DateTime when this flow was exported
*/
def optionRecord(sender: InetSocketAddress, buf: ByteBuf, fpId: UUID, template: NetFlowV9Template,
uptime: Long, timestamp: DateTime) = Try[NetFlowV9Option] {
NetFlowV9Option(UUIDs.timeBased(), sender, buf.readableBytes(), template.number, uptime, timestamp,
template.getExtraFields(buf), fpId)
}
private def doLayer[T](f: FlowPacketMeta[NetFlowV9Packet] => Future[T]): Future[T] = NodeConfig.values.storage match {
case Some(StorageLayer.Cassandra) => f(storage.cassandra.NetFlowV9Packet)
case Some(StorageLayer.Redis) => f(storage.redis.NetFlowV9Packet)
case _ => Future.exception(NoBackendDefined)
}
def persist(fp: NetFlowV9Packet): Unit = doLayer(l => Future.value(l.persist(fp)))
}
case class NetFlowV9Packet(id: UUID, sender: InetSocketAddress, length: Int, uptime: Long,
timestamp: DateTime, flows: List[Flow[_]],
flowSequence: Long, sourceId: Long) extends FlowPacket {
def version = "NetFlowV9 Packet"
def count = flows.length
def persist() = NetFlowV9Packet.persist(this)
}
case class NetFlowV9Data(id: UUID, sender: InetSocketAddress, length: Int, template: Int, uptime: Long,
timestamp: DateTime, srcPort: Int, dstPort: Int, srcAS: Option[Int], dstAS: Option[Int],
pkts: Long, bytes: Long, proto: Int, tos: Int, tcpflags: Int,
start: Option[DateTime], stop: Option[DateTime],
srcAddress: InetAddress, dstAddress: InetAddress, nextHop: Option[InetAddress],
extra: Map[String, Long], packet: UUID) extends NetFlowData[NetFlowV9Data] {
def version = "NetFlowV9Data " + template
override lazy val jsonExtra = Extraction.decompose(extra).asInstanceOf[JObject]
override lazy val stringExtra = "- Template " + template
}
case class NetFlowV9Option(id: UUID, sender: InetSocketAddress, length: Int, template: Int, uptime: Long,
timestamp: DateTime, extra: Map[String, Long], packet: UUID)
extends Flow[NetFlowV9Option] {
def version = "NetFlowV9Option " + template
override lazy val json = Serialization.write(extra)
}
| ayscb/netflow | netflow1/netflow-master/src/main/scala/io/netflow/flows/cflow/NetFlowV9.scala | Scala | apache-2.0 | 11,680 |
package adni
import com.tencent.angel.worker.storage.MemoryDataBlock
import com.tencent.angel.worker.task.{BaseTask, TaskContext}
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.io.{LongWritable, Text}
import structures.{CSRMatrix, Row}
import scala.language.implicitConversions
/**
* Created by chris on 11/14/17.
*/
object sf{
implicit def string2float(str: String) = {str.toFloat}
}
class AdTrainTask(val ctx: TaskContext) extends BaseTask[LongWritable, Text, Row[Float]](ctx) {
private val LOG = LogFactory.getLog(classOf[AdTrainTask])
var incidence = new MemoryDataBlock[Row[Float]](-1)
var N = 0
var did = 0
var rowId:Array[Int] = _
override
def parse(key: LongWritable, value: Text): Row[Float] = {
import sf._
val row = new Row[Float](value.toString)
if (row != null) {
did += 1
N += row.len
}
row
}
override
def preProcess(ctx: TaskContext) {
val reader = ctx.getReader[LongWritable, Text]
while (reader.nextKeyValue()) {
incidence.put(parse(reader.getCurrentKey(), reader.getCurrentValue))
}
}
def build(shape:(Int,Int)):CSRMatrix[Float] = {
rowId = Array.ofDim[Int](did)
val values = Array.ofDim[Float](N)
val columns = Array.ofDim[Int](N)
val rows = Array.ofDim[Int](N)
var count = 0
(0 until did) foreach{i =>
val row = incidence.get(i)
rowId(i) = row.rowId
(0 until row.len) foreach{j =>
values(count) = row.values(j)
columns(count) = row.columns(j)
rows(count) = i
count += 1
}
}
incidence.clean()
new CSRMatrix[Float](values,rows,columns,shape)
}
@throws[Exception]
def run(ctx: TaskContext): Unit = {
val model = new AdniModel(ctx.getConf, ctx)
LOG.info(s"V=${model.V} K=${model.k} PartRows=$did Entries=$N" + s" threadNum=${model.threadNum}")
val data = build((did, model.V))
val seeds = rowId.filter{f =>
f >= model.s
}
val learner = new AdLearner(ctx, model, data, rowId: Array[Int], seeds)
learner.initialize()
learner.train()
if(model.save && ctx.getTaskIndex == 0) learner.saveResult()
}
}
| LiteML/EmbedLib | src/main/scala/adni/AdTrainTask.scala | Scala | apache-2.0 | 2,164 |
package knot.data.buffers
import java.nio.ByteOrder
import scala.annotation.tailrec
class DefaultBufferInput(sink: InputSink) extends BufferInput {
private[this] val _order: ByteOrder = ByteOrder.nativeOrder()
private[this] var _buffer: UnsafeArrayBuffer = _
/**
* read position of "current buffer"
*/
private[this] var _position: Int = 0
private[this] var _allReadBufPosition: Int = 0
override def size: Int = _buffer.size
override def order: ByteOrder = _order
override def readerIndex: Int = _allReadBufPosition
override def resetReader(): Unit = {
_position = 0
_allReadBufPosition = 0
}
override def getByte(): Byte = {
prepareReadBuffer(1)
val v = getByteCore()
writePosition(1)
v
}
/**
* get byte but not move current buffer position
*
* @return byte
*/
protected def getByteCore(): Byte = {
_buffer.getByte(_position)
}
override def getBytes(dest: Array[Byte], destOffset: Int, length: Int): Unit = {
@tailrec
def go(now: Int): Int = {
val l = Math.min(_buffer.size - _position, length - now)
prepareReadBuffer(l)
_buffer.getBytes(_position, dest, destOffset + now, l)
writePosition(l)
val total = now + l
if (total < length) go(total) else total
}
go(0)
}
override def getInt(): Int = {
prepareReadBuffer(4)
val v = _buffer.getInt(_position)
writePosition(4)
v
}
override def getShort(): Short = {
prepareReadBuffer(2)
val v = _buffer.getShort(_position)
writePosition(2)
v
}
override def getLong(): Long = {
prepareReadBuffer(8)
val v = _buffer.getLong(_position)
writePosition(8)
v
}
override def getFloat(): Float = {
prepareReadBuffer(4)
val v = _buffer.getFloat(_position)
writePosition(4)
v
}
override def getDouble(): Double = {
prepareReadBuffer(8)
val v = _buffer.getDouble(_position)
writePosition(8)
v
}
override def getIntLE(): Int = {
prepareReadBuffer(4)
val v = _buffer.getIntLE(_position)
writePosition(4)
v
}
protected def prepareReadBuffer(i: Int): Unit = {
if (_buffer == null) {
_buffer = sink.next
_position = 0
} else if (_position + i > _buffer.size) {
_buffer = sink.next
_position = 0
if (_buffer == null) {
throw ByteNodeBufferException(s"index out of bounds. readindex=${_position}, read size=$i, buffer size=$size")
}
}
}
protected def writePosition(i: Int): Unit = {
_position += i
_allReadBufPosition += i
}
override def close(): Unit = {
sink.close()
_position = 0
_allReadBufPosition = 0
}
}
| defvar/knot | knot-data/src/main/scala/knot/data/buffers/DefaultBufferInput.scala | Scala | mit | 2,701 |
package gitbucket.core.service
import gitbucket.core.GitBucketCoreModule
import gitbucket.core.util.{DatabaseConfig, FileUtil}
import gitbucket.core.util.SyntaxSugars._
import io.github.gitbucket.solidbase.Solidbase
import liquibase.database.core.H2Database
import liquibase.database.jvm.JdbcConnection
import gitbucket.core.model._
import gitbucket.core.model.Profile._
import gitbucket.core.model.Profile.profile._
import gitbucket.core.model.Profile.profile.blockingApi._
import org.apache.commons.io.FileUtils
import java.sql.DriverManager
import java.io.File
import scala.util.Random
trait ServiceSpecBase {
def withTestDB[A](action: (Session) => A): A = {
FileUtil.withTmpDir(new File(FileUtils.getTempDirectory(), Random.alphanumeric.take(10).mkString)) { dir =>
val (url, user, pass) = (DatabaseConfig.url(Some(dir.toString)), DatabaseConfig.user, DatabaseConfig.password)
org.h2.Driver.load()
using(DriverManager.getConnection(url, user, pass)) { conn =>
val solidbase = new Solidbase()
val db = new H2Database()
db.setConnection(new JdbcConnection(conn)) // TODO Remove setConnection in the future
solidbase.migrate(conn, Thread.currentThread.getContextClassLoader, db, GitBucketCoreModule)
}
Database.forURL(url, user, pass).withSession { session =>
action(session)
}
}
}
def generateNewAccount(name: String)(implicit s: Session): Account = {
AccountService.createAccount(name, name, name, s"${name}@example.com", false, None, None)
user(name)
}
def user(name: String)(implicit s: Session): Account = AccountService.getAccountByUserName(name).get
lazy val dummyService = new RepositoryService with AccountService with IssuesService with PullRequestService
with CommitsService with CommitStatusService with LabelsService with MilestonesService with PrioritiesService() {}
def generateNewUserWithDBRepository(userName: String, repositoryName: String)(implicit s: Session): Account = {
val ac = AccountService.getAccountByUserName(userName).getOrElse(generateNewAccount(userName))
dummyService.insertRepository(repositoryName, userName, None, false)
ac
}
def generateNewIssue(userName: String, repositoryName: String, loginUser: String = "root")(
implicit s: Session
): Int = {
dummyService.insertIssue(
owner = userName,
repository = repositoryName,
loginUser = loginUser,
title = "issue title",
content = None,
assignedUserName = None,
milestoneId = None,
priorityId = None,
isPullRequest = true
)
}
def generateNewPullRequest(base: String, request: String, loginUser: String = null)(
implicit s: Session
): (Issue, PullRequest) = {
val Array(baseUserName, baseRepositoryName, baesBranch) = base.split("/")
val Array(requestUserName, requestRepositoryName, requestBranch) = request.split("/")
val issueId = generateNewIssue(baseUserName, baseRepositoryName, Option(loginUser).getOrElse(requestUserName))
dummyService.createPullRequest(
originUserName = baseUserName,
originRepositoryName = baseRepositoryName,
issueId = issueId,
originBranch = baesBranch,
requestUserName = requestUserName,
requestRepositoryName = requestRepositoryName,
requestBranch = requestBranch,
commitIdFrom = baesBranch,
commitIdTo = requestBranch
)
dummyService.getPullRequest(baseUserName, baseRepositoryName, issueId).get
}
}
| x-way/gitbucket | src/test/scala/gitbucket/core/service/ServiceSpecBase.scala | Scala | apache-2.0 | 3,508 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.model
import org.ensime.api
import org.ensime.api._
import org.ensime.core.{ FqnToSymbol, RichPresentationCompiler }
import org.ensime.indexer.MethodName
import org.ensime.indexer.graph._
import org.ensime.util.ensimefile._
import org.ensime.util.fileobject._
import org.ensime.vfs._
import scala.collection.mutable
import scala.reflect.internal.util.{ NoPosition, Position }
trait ModelBuilders {
self: RichPresentationCompiler with FqnToSymbol =>
def locateSymbolPos(sym: Symbol, needPos: PosNeeded): Option[SourcePosition] =
_locateSymbolPos(sym, needPos).orElse({
sym.companionSymbol match {
case NoSymbol => None
case s: Symbol => _locateSymbolPos(s, needPos)
}
})
def _locateSymbolPos(sym: Symbol,
needPos: PosNeeded): Option[SourcePosition] =
if (sym == NoSymbol || needPos == PosNeededNo)
None
else if (sym.pos != NoPosition) {
if (needPos == PosNeededYes || needPos == PosNeededAvail) {
OffsetSourcePositionHelper.fromPosition(sym.pos)
} else
Some(EmptySourcePosition())
} else {
// only perform operations is actively requested - this is comparatively expensive
if (needPos == PosNeededYes) {
val fqn = toFqn(sym).fqnString
logger.debug(s"$sym ==> $fqn")
val hit = search.findUnique(fqn)
logger.debug(s"search: $fqn = $hit")
hit.flatMap(LineSourcePositionHelper.fromFqnSymbol(_)(vfs)).flatMap {
sourcePos =>
if (sourcePos.file.isScala)
askLinkPos(sym, sourcePos.file)
.flatMap(pos => OffsetSourcePositionHelper.fromPosition(pos))
else
Some(sourcePos)
}
} else
None
}
// When inspecting a type, transform a raw list of TypeMembers to a sorted
// list of InterfaceInfo objects, each with its own list of sorted member infos.
def prepareSortedInterfaceInfo(
members: Iterable[Member],
parents: Iterable[Type]
): Iterable[InterfaceInfo] = {
// ...filtering out non-visible and non-type members
val visMembers: Iterable[TypeMember] = members.flatMap {
case m @ TypeMember(sym, tpe, true, _, _) => List(m)
case _ => List.empty
}
val parentMap = parents.map(_.typeSymbol -> List[TypeMember]()).toMap
val membersMap = visMembers.groupBy {
case TypeMember(sym, _, _, _, _) => sym.owner
}
// Create a list of pairs [(typeSym, membersOfSym)]
val membersByOwner = (parentMap ++ membersMap).toList.sortWith {
// Sort the pairs on the subtype relation
case ((s1, _), (s2, _)) => s1.tpe <:< s2.tpe
}
membersByOwner.map {
case (ownerSym, members) =>
// If all the members in this interface were
// provided by the same view, remember that
// view for later display to user.
val byView = members.groupBy(_.viaView)
val viaView = if (byView.size == 1) {
byView.keys.headOption.filter(_ != NoSymbol)
} else { None }
// Do one top level sort by name on members, before
// subdividing into kinds of members.
val sortedMembers = members.toList.sortWith { (a, b) =>
a.sym.nameString <= b.sym.nameString
}
// Convert type members into NamedTypeMemberInfos
// and divide into different kinds..
val nestedTypes = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
val constructors = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
val fields = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
val methods = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
for (tm <- sortedMembers) {
val info = NamedTypeMemberInfo(tm)
val decl = info.declAs
if (decl == DeclaredAs.Method) {
if (info.name == "this") {
constructors += info
} else {
methods += info
}
} else if (decl == DeclaredAs.Field) {
fields += info
} else if (decl == DeclaredAs.Class || decl == DeclaredAs.Trait ||
decl == DeclaredAs.Interface || decl == DeclaredAs.Object) {
nestedTypes += info
}
}
val sortedInfos = nestedTypes ++ fields ++ constructors ++ methods
new InterfaceInfo(TypeInfo(ownerSym.tpe, PosNeededAvail, sortedInfos),
viaView.map(_.name.toString))
}
}
object TypeInfo {
/**
* Check if we should dealias this type for display purposes. Types in the scala package
* and types that have a package called `Predef` in their prefix get dealiased.
*/
def shouldDealiasType(tpe: Type): Boolean = {
val prefixFullName = tpe match {
case t: AliasTypeRef => t.pre.typeSymbol.fullName
case _ => tpe.prefix.typeSymbol.fullName
}
prefixFullName == "scala" || prefixFullName.contains("Predef")
}
// use needPos=PosNeededYes sparingly as it potentially causes lots of I/O
def apply(typ: Type,
needPos: PosNeeded = PosNeededNo,
members: Iterable[EntityInfo] = List.empty): TypeInfo = {
val tpe = typ match {
case et: ExistentialType => et.underlying
case s: SingleType => s.widen
case t => t
}
val shouldDealias = shouldDealiasType(tpe)
def basicTypeInfo(tpe: Type): BasicTypeInfo = {
val typeSym =
if (shouldDealias) tpe.typeSymbol else tpe.typeSymbolDirect
val symbolToLocate =
if (typeSym.isModuleClass) typeSym.sourceModule else typeSym
val symPos = locateSymbolPos(symbolToLocate, needPos)
val typeArgs = if (shouldDealias) tpe.dealias.typeArgs else tpe.typeArgs
api.BasicTypeInfo(
shortName(tpe, shouldDealias = shouldDealias).underlying,
declaredAs(typeSym),
fullName(tpe, shouldDealias = shouldDealias).underlying,
typeArgs.map(TypeInfo(_)),
members,
symPos,
Nil
)
}
tpe match {
case arrow if isArrowType(arrow, shouldDealias) =>
ArrowTypeInfoBuilder(tpe)
case tpe: NullaryMethodType => basicTypeInfo(tpe.resultType)
case tpe: Type => basicTypeInfo(tpe)
case _ => nullInfo
}
}
def nullInfo = BasicTypeInfo("NA", DeclaredAs.Nil, "NA")
}
object ParamSectionInfoBuilder {
def apply(params: Iterable[Symbol]): ParamSectionInfo =
new ParamSectionInfo(
params.map { s =>
(s.nameString, TypeInfo(s.tpe))
},
params.exists(_.isImplicit)
)
}
object SymbolInfo {
def apply(sym: Symbol): SymbolInfo = {
val tpe = askOption(sym.tpe) match {
case None => NoType
case Some(t) => t
}
val nameString = sym.nameString
val (name, localName) =
if (sym.isClass || sym.isTrait || sym.isModule ||
sym.isModuleClass || sym.isPackageClass) {
(fullName(tpe).underlying, nameString)
} else {
(nameString, nameString)
}
new SymbolInfo(
name,
localName,
locateSymbolPos(sym, PosNeededYes),
TypeInfo(tpe, PosNeededAvail)
)
}
}
object CompletionInfoBuilder {
def fromSymbol(sym: Symbol, relevance: Int): CompletionInfo =
fromSymbolAndType(sym, sym.tpe, relevance)
def fromSymbolAndType(sym: Symbol,
tpe: Type,
relevance: Int): CompletionInfo = {
val typeInfo = TypeInfo(tpe)
CompletionInfo(
Some(typeInfo),
sym.nameString,
relevance,
None,
isInfix(sym.nameString, typeInfo)
)
}
private def isInfix(symName: String, typeInfo: TypeInfo): Boolean = {
def isEligibleForInfix(symName: String,
paramSections: Iterable[ParamSectionInfo]) = {
import InfixChecker._
if (hasWrongParametersSet(paramSections)) false
else if (isSymbolString(symName)) true
else !(isSymbolNameTooLong(symName) || isExcluded(symName))
}
typeInfo match {
case ArrowTypeInfo(_, _, _, paramSections, _) =>
isEligibleForInfix(symName, paramSections)
case _ => false
}
}
/*
* The boolean logic in this object is reversed in order to do as few checks as possible
*/
private object InfixChecker {
val MAX_NAME_LENGTH_FOR_INFIX = 3
val PARAMETER_SETS_NUM_FOR_INFIX = 1
val PARAMETER_SETS_NUM_FOR_INFIX_WITH_IMPLICIT = 2
val PARAMETER_LIST_SIZE_FOR_INFIX = 1
val EXCLUDED = Set("map")
def isSymbolString(s: String) = s.forall(isSymbol)
private def isSymbol(c: Char) = !Character.isLetterOrDigit(c)
def hasWrongParametersSet(paramSections: Iterable[ParamSectionInfo]) =
isWrongParametersSetNumber(paramSections) ||
isFirstParameterSetImplicit(paramSections) ||
hasWrongArity(paramSections)
private def isWrongParametersSetNumber(
paramSections: Iterable[ParamSectionInfo]
) =
!(paramSections.size == PARAMETER_SETS_NUM_FOR_INFIX
|| (paramSections.size == PARAMETER_SETS_NUM_FOR_INFIX_WITH_IMPLICIT
&& paramSections.tail.head.isImplicit))
private def isFirstParameterSetImplicit(
paramSections: Iterable[ParamSectionInfo]
) = paramSections.head.isImplicit
private def hasWrongArity(paramSections: Iterable[ParamSectionInfo]) =
paramSections.head.params.size != PARAMETER_LIST_SIZE_FOR_INFIX
def isExcluded(symbolName: String) = EXCLUDED.contains(symbolName)
def isSymbolNameTooLong(symbolName: String) =
symbolName.length > MAX_NAME_LENGTH_FOR_INFIX
}
}
object NamedTypeMemberInfo {
def apply(m: TypeMember): NamedTypeMemberInfo = {
val decl = declaredAs(m.sym)
val pos =
if (m.sym.pos == NoPosition) None else Some(EmptySourcePosition())
val descriptor = toFqn(m.sym) match {
case MethodName(_, _, desc) => Some(desc.descriptorString)
case _ => None
}
new NamedTypeMemberInfo(m.sym.nameString,
TypeInfo(m.tpe),
pos,
descriptor,
decl)
}
}
object ArrowTypeInfoBuilder {
def apply(tpe: Type): ArrowTypeInfo =
tpe match {
case args: ArgsTypeRef
if args.typeSymbol.fullName.startsWith("scala.Function") =>
val tparams = args.args
val result = TypeInfo(tparams.last)
val params =
if (tparams.isEmpty) Nil
else
tparams.init.zipWithIndex.map {
case (tpe, idx) => ("_" + idx, TypeInfo(tpe))
}
ArrowTypeInfo(shortName(tpe).underlying,
fullName(tpe).underlying,
result,
ParamSectionInfo(params, isImplicit = false) :: Nil,
Nil)
case TypeRef(_, definitions.ByNameParamClass, args) =>
val result = TypeInfo(args.head)
ArrowTypeInfo(shortName(tpe).underlying,
fullName(tpe).underlying,
result,
Nil,
Nil)
case _: MethodType | _: PolyType =>
new ArrowTypeInfo(
shortName(tpe).underlying,
fullName(tpe).underlying,
TypeInfo(tpe.finalResultType),
tpe.paramss.map(ParamSectionInfoBuilder.apply),
tpe.typeParams.map(tp => TypeInfo.apply(tp.tpe))
)
case _ => nullInfo()
}
def nullInfo() =
new ArrowTypeInfo("NA", "NA", TypeInfo.nullInfo, List.empty, Nil)
}
}
object LineSourcePositionHelper {
def fromFqnSymbol(
sym: FqnSymbol
)(implicit vfs: EnsimeVFS): Option[LineSourcePosition] =
(sym.sourceFileObject, sym.line) match {
case (None, _) => None
case (Some(fo), lineOpt) =>
val file = EnsimeFile(fo.uriString)
Some(new LineSourcePosition(file, lineOpt.getOrElse(0)))
}
}
object OffsetSourcePositionHelper {
def fromPosition(p: Position): Option[OffsetSourcePosition] = p match {
case NoPosition => None
case realPos =>
Some(
new OffsetSourcePosition(EnsimeFile(realPos.source.file.path),
realPos.point)
)
}
}
object BasicTypeInfo {
def apply(name: String, declAs: DeclaredAs, fullName: String): BasicTypeInfo =
api.BasicTypeInfo(name, declAs, fullName, Nil, Nil, None, Nil)
def unapply(bti: BasicTypeInfo): Option[(String, DeclaredAs, String)] =
bti match {
case api.BasicTypeInfo(a, b, c, Nil, Nil, None, Nil) => Some((a, b, c))
case _ => None
}
}
| yyadavalli/ensime-server | core/src/main/scala/org/ensime/model/ModelBuilders.scala | Scala | gpl-3.0 | 13,317 |
package org.jetbrains.plugins.scala
package refactoring.introduceVariable
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.refactoring.introduceVariable.ScalaIntroduceVariableHandler
import org.jetbrains.plugins.scala.lang.refactoring.util._
/**
* @author Alexander Podkhalyuzin
* @date 05.04.2009
*/
object IntroduceVariableTestUtil {
def extract1[T, U](x: (T, U)): T = x._1
def extract2[T, U](x: (T, U)): U = x._2
def getContainerOne(startOffset: Int, endOffset: Int, file: ScalaFile, occLength: Int) = {
val commonParentOne = PsiTreeUtil.findCommonParent(file.findElementAt(startOffset), file.findElementAt(endOffset - 1))
ScalaPsiUtil.getParentOfType(commonParentOne, occLength == 1, classOf[ScalaFile], classOf[ScBlock],
classOf[ScTemplateBody])
}
def getValidator(project: Project, editor: Editor, file: ScalaFile, startOffset: Int, endOffset: Int): ScalaValidator = {
PsiTreeUtil.getParentOfType(file.findElementAt(startOffset), classOf[ScExpression], classOf[ScTypeElement]) match {
case x: ScExpression => getVariableValidator(project, editor, file, startOffset, endOffset)
case x: ScTypeElement => getTypeValidator(project, editor, file, startOffset, endOffset)
case _ => null
}
}
def getVariableValidator(project: Project, editor: Editor, file: ScalaFile, startOffset: Int, endOffset: Int): ScalaVariableValidator = {
val (expr: ScExpression, _) = ScalaRefactoringUtil.getExpression(project, editor, file, startOffset, endOffset).get
val fileEncloser = ScalaRefactoringUtil.fileEncloser(startOffset, file)
val occurrences: Array[TextRange] = ScalaRefactoringUtil.getOccurrenceRanges(ScalaRefactoringUtil.unparExpr(expr), fileEncloser)
val container: PsiElement = ScalaRefactoringUtil.enclosingContainer(ScalaRefactoringUtil.commonParent(file, occurrences: _*))
val containerOne = getContainerOne(startOffset, endOffset, file, occurrences.length)
new ScalaVariableValidator(new ScalaIntroduceVariableHandler, project, expr, occurrences.isEmpty, container, containerOne)
}
def getTypeValidator(project: Project, editor: Editor, file: ScalaFile, startOffset: Int, endOffset: Int): ScalaTypeValidator = {
val typeElement = ScalaRefactoringUtil.getTypeElement(project, editor, file, startOffset, endOffset).get
val fileEncloser = ScalaRefactoringUtil.fileEncloser(startOffset, file)
val occurrences = ScalaRefactoringUtil.getTypeElementOccurrences(typeElement, fileEncloser)
val container = ScalaRefactoringUtil.enclosingContainer(PsiTreeUtil.findCommonParent(occurrences: _*))
val containerOne = getContainerOne(startOffset, endOffset, file, occurrences.length)
new ScalaTypeValidator(new ScalaIntroduceVariableHandler, project, typeElement, occurrences.isEmpty, container, containerOne)
}
} | LPTK/intellij-scala | test/org/jetbrains/plugins/scala/refactoring/introduceVariable/IntroduceVariableTestUtil.scala | Scala | apache-2.0 | 3,342 |
package benchmark
import tiscaf._
object ServerOk extends HServer {
def main(args: Array[String]): Unit = {
try { new HStop("localhost", 8911) stop } catch { case _: Exception => }
Thread.sleep(600)
start
}
protected def ports = Set(8910)
protected lazy val apps = List(theApp)
override protected def tcpNoDelay = true // true for benchmarking only!!
override def interruptTimeoutMillis = 100
override def error(msg: String, t: Throwable): Unit = filterSomeErrors(t)
private def filterSomeErrors(err: Throwable): Unit = err.getMessage match {
case "Broken pipe" =>
case "Connection reset by peer" =>
case _ => err match {
case _: java.nio.channels.ClosedSelectorException =>
case e => e.printStackTrace
}
}
object theApp extends HApp {
override def keepAlive = true
override def chunked = false
override def buffered = false
override def gzip = false
override def tracking = HTracking.NotAllowed
def resolve(req: HReqData) = Some(Let)
}
object Let extends HSimpleLet {
private val bytes = "Ok".getBytes("UTF-8")
def act(tk: HTalk) {
tk.setContentLength(bytes.size)
.setContentType("text/plain; charset=UTF-8")
.setContentLength(bytes.length) // if not buffered
.write(bytes)
()
}
}
}
| gnieh/tiscaf | core/src/test/scala/benchmark/ServerOk.scala | Scala | lgpl-3.0 | 1,425 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.