code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package foo {
protected[foo] trait A {
def a: Unit = {}
}
class B extends A
}
trait C extends foo.B
object Test {
def test: Unit = {
val c = new C {}
c.a
}
}
| lampepfl/dotty | tests/pos/trait-access.scala | Scala | apache-2.0 | 180 |
/*
* Copyright 2015 Geeoz Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pawl
import java.util.ResourceBundle
/** Utility trait for implicit converting of the cortege to resource bundle
* value.
*/
trait Bundle {
/** Retrieve value from resource bundle from cortege.
* @param cortege cortege to use
* @return localized value
*/
implicit def value(cortege: (_ <: String, _ <: String)): String = {
ResourceBundle.getBundle(cortege._1, Lang).getString(cortege._2)
}
}
| geeoz/pawl | pawl-scalatest/src/main/scala/pawl/Bundle.scala | Scala | apache-2.0 | 1,027 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.prac.innovation.model
import org.beangle.commons.collection.Collections
import org.beangle.data.model.LongId
import scala.collection.mutable
class InitReview extends LongId {
var project: InnovProject = _
var score: Option[Float] = None
var group: Option[InitReviewGroup] = None
var details: mutable.Buffer[InitReviewDetail] = Collections.newBuffer[InitReviewDetail]
}
| openurp/api | prac/src/main/scala/org/openurp/prac/innovation/model/InitReview.scala | Scala | lgpl-3.0 | 1,125 |
package com.catinthedark.lib.network
import scala.collection.mutable
class MessageBus(val transport: Transport) extends IMessageBus {
val subscribers = new mutable.ListBuffer[Subscriber[_]]
transport.setReceiver(wrapper => {
subscribers.filter((sub) => {
sub.className == wrapper.data.getClass.getCanonicalName
}).foreach((sub) => {
sub.send(wrapper.data, wrapper.sender)
})
})
override def send(message: Message): Unit = {
transport.send(message)
}
override def subscribe[T](clazz: Class[T], callback: (T, String) => Unit): Unit = {
subscribers += Subscriber(clazz.getCanonicalName, callback)
}
}
case class Subscriber[T](className: String, callback: (T, String) => Unit) {
def send(data: Any, sender: String): Unit = {
callback(data.asInstanceOf[T], sender)
}
} | cat-in-the-dark/old48_36_game | core/src/main/scala/com/catinthedark/lib/network/MessageBus.scala | Scala | mit | 830 |
package reopp.common.benchmarks
import _root_.z3.scala.{Z3Config, Z3AST, Z3Context}
import reopp.common.guardedcommands._
import reopp.common.guardedcommands.dataconnectors._
import scala.math.pow
import _root_.choco.kernel.model.variables.integer.IntegerExpressionVariable
import _root_.choco.Choco
import reopp.common._
import Utils._
import reopp.common.guardedcommands.IntPred
import z3.Z3
import reopp.common.guardedcommands.dataconnectors.ConstraintGen._
import reopp.common.guardedcommands.Neg
import reopp.common.guardedcommands.Pred
/**
* Created with IntelliJ IDEA.
* User: jose
* Date: 21/06/12
* Time: 10:25
* To change this template use File | Settings | File Templates.
*/
class AllApprovalData
object AllApprovalData extends App {
Warmup.go
val n = if (!args.isEmpty) Integer.parseInt(args(0))
else 8
val satfull = if (args.size > 1) args(1) startsWith "s"
else false
val chocosat = if (args.size > 1) args(1) startsWith "cs"
else false
val choco = if (args.size > 1) (args(1) startsWith "c") && !chocosat
else false
val z3sat = if (args.size > 1) args(1) startsWith "zs"
else false
val z3 = if (args.size > 1) (args(1) startsWith "z") && !z3sat
else false
val quicksat = if (args.size > 1) args(1) startsWith "q"
else false
val lazyy = if (args.size > 1) args(1) startsWith "l"
else false
val justInit = if (args.size > 2) args(2) startsWith "i"
else false
def genClients(n:Int): Iterable[GCWriter] = {
var res = List[GCWriter]()
for (i <- n to 1 by -1) {
res ::=
new GCWriter("w"+i,List((i,(i*3 % 16)+5,(i*4 % 16)+5,(i*5 % 16)+5,(i*6 % 16)+5))) // tuple
// new GCWriter("w"+i,0,List(join(i,(i*3 % 16)+5,(i*4 % 16)+5,(i*5 % 16)+5)))
// println("new writer: "+(i,(i*3 % 16)+5,(i*4 % 16)+5,(i*5 % 16)+5)+ " -- "+
// join(i,(i*3 % 16)+5,(i*4 % 16)+5,(i*5 % 16)+5))
}
res
}
def genMergers(height:Int): Formula= {
val size = pow(2,height)
var srcs = List("x")
var res = Formula()
for (level <- 1 to height) {
var newsrcs = List[String]()
for (x <- srcs) {
res ++= merger(x+"1",x+"2",x)
newsrcs :::= List(x+"1",x+"2")
}
srcs = newsrcs
}
// println("size / n.of srcs: "+size+"/"+srcs.size)
// println("clients: "+genClients(size.toInt).map(_.x))
for (wr <- genClients(size.toInt)) {
srcs match {
case hd::tl =>
res ++= (wr.getConstraints ++ sync(wr.x,hd))
srcs = tl
case Nil => {}
}
}
res
}
val approve = Predicate("Approve") {
case x:(Int,Int,Int,Int,Int) => (x._2*2 + x._3*2 + x._4*3 + x._5*5) >= 140
case x => throw new RuntimeException("unexpeced type "+x)
}
val deny = Predicate("Deny") {
case x:(Int,Int,Int,Int,Int) => (x._2*2 + x._3*2 + x._4*3 + x._5*5) <= 90
case x => throw new RuntimeException("unexpeced type "+x)
}
val problem = genMergers(n) ++
filter("x","app-ok",approve) ++
filter("x","den-ok",deny) ++
genfilter("x","neither-ok", v => Neg(Pred(v,approve)) and
Neg(Pred(v,deny)))
// flow("x") ++
// flow("app-ok")
// writer("x",List(19))
if (justInit) problem.justInit = true
else if (quicksat) {
val time = System.currentTimeMillis()
val res = problem.quickDataSolveSAT4J
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (z3sat) {
val z3 = new Z3Context(new Z3Config("MODEL" -> true))
val time = System.currentTimeMillis()
val res = problem.quickDataSolveZ3(z3)
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (lazyy) {
val time = System.currentTimeMillis()
val res = problem.solveChocoPredAbstVarOrdered
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (satfull || chocosat || choco || z3)
print(0)
/// EXPERIMENTS:
else {
// println(" # THE PROBLEM:\\n"+problem.commands.mkString(" - ","\\n - ","\\n"))
var time: Long = 0
var res: OptionSol[Solution[_]] = null
var spent: Long = 0
//// QUICK-SAT ////
time = System.currentTimeMillis()
res = problem.quickDataSolveSAT4J
spent = System.currentTimeMillis() - time
// if (res.isDefined) println("quick-sat - solved in "+spent+" ms:\\n"+res.get.pretty)
// else println("quick-sat - no solution (in "+spent+" ms)")
println("quick-sat - "+spent)
// THESE ARE NOT IMPLEMENTED: only for
// //// SAT-FULL ////
// time = System.currentTimeMillis()
// res = problem.solveIterative
// spent = System.currentTimeMillis() - time
//// if (res.isDefined) println("SAT-full - solved in "+spent+" ms:\\n"+res.get.pretty)
//// else println("SAT-full - no solution (in "+spent+" ms)")
// println("SAT-full - "+spent)
//
// //// SATC-FULL ////
// time = System.currentTimeMillis()
// res = problem.solveChocoSat
// spent = System.currentTimeMillis() - time
//// if (res.isDefined) println("SATC-full - solved in "+spent+" ms:\\n"+res.get.pretty)
//// else println("SATC-full - no solution (in "+spent+" ms)")
// println("SATC-full - "+spent)
//
// //// CHOCO ////
// time = System.currentTimeMillis()
// res = problem.solveChoco
// spent = System.currentTimeMillis() - time
//// if (res.isDefined) println("Choco - solved in "+spent+" ms:\\n"+res.get.pretty)
//// else println("Choco - no solution (in "+spent+" ms)")
// println("Choco - "+spent)
//
// /// Z3 ////
// val z3 = new Z3Context(new Z3Config("MODEL" -> true))
// time = System.currentTimeMillis()
// res = Z3.solvez3(Z3.gc2z3(problem,z3),z3)
// spent = System.currentTimeMillis() - time
//// if (res.isDefined) println("Z3 - solved in "+spent+" ms:\\n"+res.get.pretty)
//// else println("Z3 - no solution (in "+spent+" ms)")
// println("Z3 - "+spent)
//// QUICK-SAT-Z3 ////
val zz3 = new Z3Context(new Z3Config("MODEL" -> true))
time = System.currentTimeMillis()
res = problem.quickDataSolveZ3(zz3)
spent = System.currentTimeMillis() - time
// if (res.isDefined) println("quick-z3 - solved in "+spent+" ms:\\n"+res.get.pretty)
// else println("quick-z3 - no solution (in "+spent+" ms)")
if (res.isDefined) println("ok/accept/neither: "+
res.get.getDataOn(mkDataVar("app-ok"))+"/"+
res.get.getDataOn(mkDataVar("den-ok"))+"/"+
res.get.getDataOn(mkDataVar("neither-ok"))+"/"
)
println("quick-z3 - "+spent)
// LAZY-SAT ////
time = System.currentTimeMillis()
res = problem.solveChocoPredAbstVarOrdered
spent = System.currentTimeMillis() - time
// if (res.isDefined) println("lazy-sat - solved in "+spent+" ms:\\n"+res)
// else println("lazy-sat - no solution (in "+spent+" ms)")
if (res.isDefined) println("ok/accept/neither: "+
res.get.getDataOn(mkDataVar("app-ok"))+"/"+
res.get.getDataOn(mkDataVar("den-ok"))+"/"+
res.get.getDataOn(mkDataVar("neither-ok"))+"/"
)
println("lazy-sat - "+spent)
// ChocoDyn ////
time = System.currentTimeMillis()
res = problem.solveChocoDyn
spent = System.currentTimeMillis() - time
// if (res.isDefined) println("lazy-sat - solved in "+spent+" ms:\\n"+res)
// else println("lazy-sat - no solution (in "+spent+" ms)")
if (res.isDefined) println("ok/accept/neither: "+
res.get.getDataOn(mkDataVar("app-ok"))+"/"+
res.get.getDataOn(mkDataVar("den-ok"))+"/"+
res.get.getDataOn(mkDataVar("neither-ok"))+"/"
)
println("choco dyn tables - "+spent)
// ChocoDyn ////
time = System.currentTimeMillis()
res = problem.solveXZ3
spent = System.currentTimeMillis() - time
// if (res.isDefined) println("lazy-sat - solved in "+spent+" ms:\\n"+res)
// else println("lazy-sat - no solution (in "+spent+" ms)")
if (res.isDefined) println("ok/accept/neither: "+
res.get.getDataOn(mkDataVar("app-ok"))+"/"+
res.get.getDataOn(mkDataVar("den-ok"))+"/"+
res.get.getDataOn(mkDataVar("neither-ok"))+"/"
)
println("X-Z3 - "+spent)
}
}
| joseproenca/ip-constraints | code/src/main/scala/reopp/common/benchmarks/AllApprovalData.scala | Scala | mit | 8,256 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import org.apache.kafka.common.config.ConfigException
import org.junit.{After, Before, Test}
import scala.util.Random
import scala.collection.JavaConverters._
import scala.collection.Seq
import org.apache.log4j.{Level, Logger}
import java.util.Properties
import java.util.concurrent.ExecutionException
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.{CoreUtils, TestUtils}
import kafka.utils.TestUtils._
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.TimeoutException
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.clients.admin.{Admin, AdminClient, AdminClientConfig}
import org.junit.Assert._
import org.scalatest.Assertions.intercept
class UncleanLeaderElectionTest extends ZooKeeperTestHarness {
val brokerId1 = 0
val brokerId2 = 1
// controlled shutdown is needed for these tests, but we can trim the retry count and backoff interval to
// reduce test execution time
val enableControlledShutdown = true
var configProps1: Properties = null
var configProps2: Properties = null
var configs: Seq[KafkaConfig] = Seq.empty[KafkaConfig]
var servers: Seq[KafkaServer] = Seq.empty[KafkaServer]
val random = new Random()
val topic = "topic" + random.nextLong
val partitionId = 0
val kafkaApisLogger = Logger.getLogger(classOf[kafka.server.KafkaApis])
val networkProcessorLogger = Logger.getLogger(classOf[kafka.network.Processor])
@Before
override def setUp(): Unit = {
super.setUp()
configProps1 = createBrokerConfig(brokerId1, zkConnect)
configProps2 = createBrokerConfig(brokerId2, zkConnect)
for (configProps <- List(configProps1, configProps2)) {
configProps.put("controlled.shutdown.enable", enableControlledShutdown.toString)
configProps.put("controlled.shutdown.max.retries", "1")
configProps.put("controlled.shutdown.retry.backoff.ms", "1000")
}
// temporarily set loggers to a higher level so that tests run quietly
kafkaApisLogger.setLevel(Level.FATAL)
networkProcessorLogger.setLevel(Level.FATAL)
}
@After
override def tearDown(): Unit = {
servers.foreach(server => shutdownServer(server))
servers.foreach(server => CoreUtils.delete(server.config.logDirs))
// restore log levels
kafkaApisLogger.setLevel(Level.ERROR)
networkProcessorLogger.setLevel(Level.ERROR)
super.tearDown()
}
private def startBrokers(cluster: Seq[Properties]): Unit = {
for (props <- cluster) {
val config = KafkaConfig.fromProps(props)
val server = createServer(config)
configs ++= List(config)
servers ++= List(server)
}
}
@Test
def testUncleanLeaderElectionEnabled(): Unit = {
// enable unclean leader election
configProps1.put("unclean.leader.election.enable", "true")
configProps2.put("unclean.leader.election.enable", "true")
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker
TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers)
verifyUncleanLeaderElectionEnabled
}
@Test
def testUncleanLeaderElectionDisabled(): Unit = {
// unclean leader election is disabled by default
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker
TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers)
verifyUncleanLeaderElectionDisabled
}
@Test
def testUncleanLeaderElectionEnabledByTopicOverride(): Unit = {
// disable unclean leader election globally, but enable for our specific test topic
configProps1.put("unclean.leader.election.enable", "false")
configProps2.put("unclean.leader.election.enable", "false")
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election enabled
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", "true")
TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers, topicProps)
verifyUncleanLeaderElectionEnabled
}
@Test
def testUncleanLeaderElectionDisabledByTopicOverride(): Unit = {
// enable unclean leader election globally, but disable for our specific test topic
configProps1.put("unclean.leader.election.enable", "true")
configProps2.put("unclean.leader.election.enable", "true")
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election disabled
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", "false")
TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1, brokerId2)), servers, topicProps)
verifyUncleanLeaderElectionDisabled
}
@Test
def testUncleanLeaderElectionInvalidTopicOverride(): Unit = {
startBrokers(Seq(configProps1))
// create topic with an invalid value for unclean leader election
val topicProps = new Properties()
topicProps.put("unclean.leader.election.enable", "invalid")
intercept[ConfigException] {
TestUtils.createTopic(zkClient, topic, Map(partitionId -> Seq(brokerId1)), servers, topicProps)
}
}
def verifyUncleanLeaderElectionEnabled(): Unit = {
// wait until leader is elected
val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId)
debug("Leader for " + topic + " is elected to be: %s".format(leaderId))
assertTrue("Leader id is set to expected value for topic: " + topic, leaderId == brokerId1 || leaderId == brokerId2)
// the non-leader broker is the follower
val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1
debug("Follower for " + topic + " is: %s".format(followerId))
produceMessage(servers, topic, "first")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
assertEquals(List("first"), consumeAllMessages(topic, 1))
// shutdown follower server
servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server))
produceMessage(servers, topic, "second")
assertEquals(List("first", "second"), consumeAllMessages(topic, 2))
//remove any previous unclean election metric
servers.map(_.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec"))
// shutdown leader and then restart follower
servers.filter(_.config.brokerId == leaderId).map(shutdownServer)
val followerServer = servers.find(_.config.brokerId == followerId).get
followerServer.startup()
// wait until new leader is (uncleanly) elected
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId))
assertEquals(1, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count())
produceMessage(servers, topic, "third")
// second message was lost due to unclean election
assertEquals(List("first", "third"), consumeAllMessages(topic, 2))
}
def verifyUncleanLeaderElectionDisabled(): Unit = {
// wait until leader is elected
val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId)
debug("Leader for " + topic + " is elected to be: %s".format(leaderId))
assertTrue("Leader id is set to expected value for topic: " + topic, leaderId == brokerId1 || leaderId == brokerId2)
// the non-leader broker is the follower
val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1
debug("Follower for " + topic + " is: %s".format(followerId))
produceMessage(servers, topic, "first")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
assertEquals(List("first"), consumeAllMessages(topic, 1))
// shutdown follower server
servers.filter(server => server.config.brokerId == followerId).foreach(server => shutdownServer(server))
produceMessage(servers, topic, "second")
assertEquals(List("first", "second"), consumeAllMessages(topic, 2))
//remove any previous unclean election metric
servers.foreach(server => server.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec"))
// shutdown leader and then restart follower
servers.filter(server => server.config.brokerId == leaderId).foreach(server => shutdownServer(server))
val followerServer = servers.find(_.config.brokerId == followerId).get
followerServer.startup()
// verify that unclean election to non-ISR follower does not occur
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(-1))
assertEquals(0, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count())
// message production and consumption should both fail while leader is down
try {
produceMessage(servers, topic, "third", deliveryTimeoutMs = 1000, requestTimeoutMs = 1000)
fail("Message produced while leader is down should fail, but it succeeded")
} catch {
case e: ExecutionException if e.getCause.isInstanceOf[TimeoutException] => // expected
}
assertEquals(List.empty[String], consumeAllMessages(topic, 0))
// restart leader temporarily to send a successfully replicated message
servers.filter(server => server.config.brokerId == leaderId).foreach(server => server.startup())
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(leaderId))
produceMessage(servers, topic, "third")
//make sure follower server joins the ISR
TestUtils.waitUntilTrue(() => {
val partitionInfoOpt = followerServer.metadataCache.getPartitionInfo(topic, partitionId)
partitionInfoOpt.isDefined && partitionInfoOpt.get.isr.contains(followerId)
}, "Inconsistent metadata after first server startup")
servers.filter(server => server.config.brokerId == leaderId).foreach(server => shutdownServer(server))
// verify clean leader transition to ISR follower
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId))
// verify messages can be consumed from ISR follower that was just promoted to leader
assertEquals(List("first", "second", "third"), consumeAllMessages(topic, 3))
}
private def shutdownServer(server: KafkaServer) = {
server.shutdown()
server.awaitShutdown()
}
private def consumeAllMessages(topic: String, numMessages: Int): Seq[String] = {
val brokerList = TestUtils.bootstrapServers(servers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))
// Don't rely on coordinator as it may be down when this method is called
val consumer = TestUtils.createConsumer(brokerList,
groupId = "group" + random.nextLong,
enableAutoCommit = false,
valueDeserializer = new StringDeserializer)
try {
val tp = new TopicPartition(topic, partitionId)
consumer.assign(Seq(tp).asJava)
consumer.seek(tp, 0)
TestUtils.consumeRecords(consumer, numMessages).map(_.value)
} finally consumer.close()
}
@Test
def testTopicUncleanLeaderElectionEnable(): Unit = {
// unclean leader election is disabled by default
startBrokers(Seq(configProps1, configProps2))
// create topic with 1 partition, 2 replicas, one on each broker
adminZkClient.createTopicWithAssignment(topic, config = new Properties(), Map(partitionId -> Seq(brokerId1, brokerId2)))
// wait until leader is elected
val leaderId = waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId)
// the non-leader broker is the follower
val followerId = if (leaderId == brokerId1) brokerId2 else brokerId1
produceMessage(servers, topic, "first")
waitUntilMetadataIsPropagated(servers, topic, partitionId)
assertEquals(List("first"), consumeAllMessages(topic, 1))
// shutdown follower server
servers.filter(server => server.config.brokerId == followerId).map(server => shutdownServer(server))
produceMessage(servers, topic, "second")
assertEquals(List("first", "second"), consumeAllMessages(topic, 2))
//remove any previous unclean election metric
servers.map(server => server.kafkaController.controllerContext.stats.removeMetric("UncleanLeaderElectionsPerSec"))
// shutdown leader and then restart follower
servers.filter(server => server.config.brokerId == leaderId).map(server => shutdownServer(server))
val followerServer = servers.find(_.config.brokerId == followerId).get
followerServer.startup()
assertEquals(0, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count())
// message production and consumption should both fail while leader is down
try {
produceMessage(servers, topic, "third", deliveryTimeoutMs = 1000, requestTimeoutMs = 1000)
fail("Message produced while leader is down should fail, but it succeeded")
} catch {
case e: ExecutionException if e.getCause.isInstanceOf[TimeoutException] => // expected
}
assertEquals(List.empty[String], consumeAllMessages(topic, 0))
// Enable unclean leader election for topic
val adminClient = createAdminClient()
val newProps = new Properties
newProps.put(KafkaConfig.UncleanLeaderElectionEnableProp, "true")
TestUtils.alterTopicConfigs(adminClient, topic, newProps).all.get
adminClient.close()
// wait until new leader is (uncleanly) elected
waitUntilLeaderIsElectedOrChanged(zkClient, topic, partitionId, newLeaderOpt = Some(followerId))
assertEquals(1, followerServer.kafkaController.controllerContext.stats.uncleanLeaderElectionRate.count())
produceMessage(servers, topic, "third")
// second message was lost due to unclean election
assertEquals(List("first", "third"), consumeAllMessages(topic, 2))
}
private def createAdminClient(): Admin = {
val config = new Properties
val bootstrapServers = TestUtils.bootstrapServers(servers, new ListenerName("PLAINTEXT"))
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
config.put(AdminClientConfig.METADATA_MAX_AGE_CONFIG, "10")
AdminClient.create(config)
}
}
| noslowerdna/kafka | core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala | Scala | apache-2.0 | 15,274 |
package org.scalamu.core.compilation
/**
* A scalac phase, introduced by a plugin.
*
* @param name internal name of the scalac phase
*/
sealed abstract class PluginPhase(val name: String)
case object ScoverageInstrumentationPhase extends PluginPhase("scoverage-instrumentation")
case object ScalamuMutationPhase extends PluginPhase("mutating-transform")
| sugakandrey/scalamu | core/src/main/scala/org/scalamu/core/compilation/skippablePhase.scala | Scala | gpl-3.0 | 369 |
package com.tutorial.stateful
import akka.actor.SupervisorStrategy.{Escalate, Restart, Stop}
import akka.actor._
import scala.collection.JavaConverters._
/**
* Created by renienj on 8/30/15.
*/
class Father extends Actor{
override final val supervisorStrategy = OneForOneStrategy(){
case _: ActorInitializationException => Stop
case _: ActorKilledException => Stop
case _: Exception => Restart
case _ => Escalate
}
@scala.throws[Exception](classOf[Exception])
override def preStart(): Unit = {
//Daughters are under father
val numberOfDaughters: Int = 2
val daughtersNames = context.system.settings.config.getStringList(
"akka.apartment.familyMembers.daughtersNames").asScala
daughtersNames take numberOfDaughters foreach { dName =>
context.actorOf(Props[Daughter], dName)
}
}
override def receive: Receive = {
case _ =>
}
} | Renien/akka-tutorials | src/main/scala/com/tutorial/stateful/Father.scala | Scala | unlicense | 898 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.core.debug
import scala.collection.JavaConverters._
import java.io.File
import java.util.concurrent.ConcurrentHashMap
import org.ensime.api.{ EnsimeConfig, LineSourcePosition }
import org.ensime.config._
import org.ensime.util.file.RichFile
import org.scaladebugger.api.profiles.traits.info.LocationInfoProfile
import scala.collection.mutable
/**
* Represents a utility to map local source files provided by Ensime to
* JDI locations.
*
* @param config The Ensime configuration used to load source files
* @param pathMap Represents a cache of files indexed by short file paths such
* as file.scala rather than org/ensime/file.scala
*/
class SourceMap(
private val config: EnsimeConfig,
private val pathMap: mutable.Map[String, File] = new ConcurrentHashMap[String, File]().asScala
) {
/** Contains a collection of root paths where source files are located */
private lazy val roots: Seq[String] = retrieveRoots
/** Contains a set of local Scala source files */
private lazy val sources: Set[File] = retrieveSources
/** Contains a mapping of filename (file.scala) to local file */
private lazy val sourceMap: Map[String, Set[File]] = sources.groupBy(_.getName)
/**
* Creates a new LineSourcePosition instance from the given location.
*
* @param location The location to use when constructing the new
* LineSourcePosition
* @return Some LineSourcePosition if matching source file is found,
* otherwise None
*/
def newLineSourcePosition(
location: LocationInfoProfile
): Option[LineSourcePosition] = {
findFileByLocation(location).map(f =>
LineSourcePosition(f, location.lineNumber))
}
/**
* Finds the local source file mapping to the given location.
*
* @param location The location whose source file to find
* @return Some file representing the local source, otherwise None
*/
def findFileByLocation(location: LocationInfoProfile): Option[File] = {
val path = location.trySourcePath.toOption
path.flatMap(sourceForFilePath)
}
/**
* Retrieves all current Scala sources available through Ensime with the
* given file name.
*
* @param fileName The name of the file whose matches to retrieve
* @return The set of sources whose file name match the given name
*/
def sourcesForFileName(fileName: String): Set[File] =
sourceMap.getOrElse(fileName, Set())
/**
* Retrieves the current Scala source available through Ensime with the
* given file path.
*
* @param filePath The path of the file whose match to retrieve
* @return Some source whose file path matches the given path, otherwise None
*/
def sourceForFilePath(filePath: String): Option[File] = {
// Check if we have a match in the cached path map first
val cachedResult = pathMap.get(filePath)
// If no cached result, search through all of sources to find a match
val result = sources.find(_.getAbsolutePath.endsWith(filePath))
// Store the found result as our new cached result
if (cachedResult.isEmpty && result.nonEmpty)
pathMap.put(filePath, result.get)
cachedResult.orElse(result)
}
/**
* Retrieves current Scala sources available through Ensime.
*
* @return The set of Scala source files
*/
def canonicalSources: Set[File] = sources
/**
* Parses the canonical path of the provided file, removing the root path
* and leaving the relative source path for use by breakpoints.
*
* @param file The file whose path to parse
* @return The relative source path
*/
def parsePath(file: File): String = {
parsePath(file.getCanonicalPath)
}
/**
* Parses the file path, removing the root path and leaving the relative
* source path for use by breakpoints.
*
* @param filePath The absolute file path
* @return The relative source path
*/
def parsePath(filePath: String): String = {
parsePath(roots, filePath)
}
/**
* Parses a source path, removing the matching root path from the source path.
* @param rootPaths The root paths to remove from the source path
* @param sourcePath The source path to strip of the root path
* @return The stripped source path
*/
private def parsePath(rootPaths: Seq[String], sourcePath: String): String = {
rootPaths.find(sourcePath.startsWith).map(p => sourcePath.replace(p, ""))
.getOrElse(sourcePath).stripPrefix(java.io.File.separator)
}
/**
* Retrieves a collection of file paths representing the root locations of
* source files managed by Ensime.
*
* @return The distinct root paths as strings
*/
protected def retrieveRoots: Seq[String] = (
config.compileClasspath.map(_.getCanonicalPath).toSeq ++
config.referenceSourceRoots.map(_.getCanonicalPath) ++
config.subprojects.flatMap(_.sourceRoots).map(_.getCanonicalPath)
).distinct
/**
* Retrieves a set of local files representing available Scala source files
* managed by Ensime.
*
* @return The set of local files
*/
protected def retrieveSources: Set[File] = config.scalaSourceFiles.map(_.canon)
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/core/src/main/scala/org/ensime/core/debug/SourceMap.scala | Scala | apache-2.0 | 5,272 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
class TableScanTest extends TableTestBase {
private val util = streamTestUtil()
@Test
def testTableSourceScan(): Unit = {
util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
util.verifyPlan("SELECT * FROM MyTable")
}
@Test
def testDataStreamScan(): Unit = {
util.addDataStream[(Int, Long, String)]("DataStreamTable", 'a, 'b, 'c)
util.verifyPlan("SELECT * FROM DataStreamTable")
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/TableScanTest.scala | Scala | apache-2.0 | 1,450 |
package org.zachary.aws_repl
import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.ec2.AmazonEC2Client
import com.amazonaws.services.ec2.model._
import scala.collection.JavaConverters._
class ExtendedEC2Client(awscp: AWSCredentialsProvider, cc: ClientConfiguration) extends AmazonEC2Client(awscp, cc) {
def createImage(instanceId: String, imageName: String, description: String, noReboot: Boolean = true): String = {
val request: CreateImageRequest = new CreateImageRequest
request.setInstanceId(instanceId)
request.setName(imageName)
request.setDescription(description)
request.setNoReboot(noReboot)
val image: CreateImageResult = createImage(request)
image.getImageId
}
def createTags(instanceId: String, tags: Map[String, String]): Unit = {
val request: CreateTagsRequest = new CreateTagsRequest
request.setResources(List(instanceId).asJava)
request.setTags(tags.map(t => {
new Tag(t._1, t._2)
}).toList.asJava)
createTags(request)
}
def deleteTags(instanceId: String, tags: Map[String, String]): Unit = {
val request: DeleteTagsRequest = new DeleteTagsRequest
request.setResources(List(instanceId).asJava)
request.setTags(tags.map(t => {
new Tag(t._1, t._2)
}).toList.asJava)
deleteTags(request)
}
// ec2.createTagsOnInstancesByName("foo-node-*", "projectid", "some.value")
def createTagsOnInstancesByName(instanceName: String, tagName: String, tagValue: String): Unit = {
val filters: Filter = new Filter("tag:Name", List(s"$instanceName").asJava)
val request: DescribeInstancesRequest = new DescribeInstancesRequest
request.setFilters(List(filters).asJava)
val instances: DescribeInstancesResult = describeInstances(request)
instances.getReservations.iterator().asScala.foreach(reservation => {
reservation.getInstances.asScala.foreach(instance => {
createTags(instance.getInstanceId, Map(tagName -> tagValue))
println(s"Setting tag $tagName for ${instance.getInstanceId}")
})
})
}
def deregisterImage(amiId: String): Unit = {
deregisterImage(new DeregisterImageRequest(amiId))
}
case class InstancesResult(instanceValues: List[InstanceValues]) {
def print: String = {
instanceValues.map(_.toString).mkString("\\n")
}
def printIPs: String = {
instanceValues.map(_.toIPs).mkString("\\n")
}
def printPrivateIPs: String = {
instanceValues.map(_.privateIPAddress).sortBy(identity).mkString("\\n")
}
def printPublicIPs: String = {
instanceValues.map(_.publicIPAddress).sortBy(identity).mkString("\\n")
}
}
case class InstanceValues(
instanceId: String,
state: String,
privateIPAddress: String,
publicIPAddress: String
) {
override def toString: String = {
f"$instanceId%-12s$state%-10s$privateIPAddress%-15s$publicIPAddress"
}
def toIPs: String = {
f"$privateIPAddress%-15s$publicIPAddress"
}
}
def findInstanceValuesByTagName(tagName: String): InstancesResult = {
val filters: Filter = new Filter("tag:Name", List(tagName).asJava)
val request = new DescribeInstancesRequest
request.setFilters(List(filters).asJava)
InstancesResult(describeInstances(request).getReservations.iterator().asScala.flatMap(reservation => {
reservation.getInstances.asScala.map(i => {
InstanceValues(i.getInstanceId, i.getState.getName, i.getPrivateIpAddress, i.getPublicIpAddress)
})
}).toList.sortBy(_.privateIPAddress))
}
private def getInstanceName(instance: Instance): String =
instance.getTags.asScala.find(_.getKey == "Name").map(_.getValue).getOrElse("No Name")
def printAllInstances(): Unit = {
val request = new DescribeInstancesRequest
println("\\n")
println("Name,InstanceId,InstanceType,State,PublicIP,Owners,Team,LaunchTime")
describeInstances(request).getReservations.iterator().asScala.map(_.getInstances.asScala).flatten.toList
.sortBy(getInstanceName) foreach (i => {
val name = getInstanceName(i)
val state = i.getState.getName
val owners = i.getTags.asScala.find(_.getKey == "owners").map(_.getValue.replace(",", ";")).getOrElse("No owner")
val team = i.getTags.asScala.find(_.getKey == "Team").map(_.getValue).getOrElse("No team")
println(
s"$name,${i.getInstanceId},${i.getInstanceType},$state,${i.getPublicIpAddress},$owners,$team," +
s"${i.getLaunchTime}")
})
println("\\n")
}
}
| zacharyp/aws_repl | lib/src/main/scala/org/zachary/aws_repl/ExtendedEC2Client.scala | Scala | mit | 4,564 |
package spire.math
package poly
import java.math.{ RoundingMode, MathContext }
import spire.std.bigInt._
import spire.std.bigDecimal._
/**
* A trait that can be used to retreive the (possibly approximated) real
* roots of the polynomial `poly`.
*/
trait Roots[A] { self =>
/** The polynomial the roots belong to. */
def poly: Polynomial[A]
/** Returns the number of real roots of `poly`. */
def count: Int
/**
* Returns the `i`-th real root of `poly`, or throws an
* `IndexOutOfBoundsException` if there is no `i`-th real root.
*/
def get(i: Int): A
}
object Roots {
final def isolateRoots[A](poly: Polynomial[A])(implicit isolator: RootIsolator[A]): Vector[Interval[Rational]] =
isolator.isolateRoots(poly)
/**
* Returns a polynomial with the same roots as `poly`, but only integer coefficients.
*/
final def removeFractions(poly: Polynomial[Rational]): Polynomial[BigInt] = {
val coeffs = poly.coeffsArray
val factors = coeffs.foldLeft(BigInt(1)) { (acc, coeff) =>
val d = coeff.denominator
acc * (d / acc.gcd(d))
}
val zCoeffs = coeffs.map(n => n.numerator * (factors / n.denominator))
Polynomial.dense(zCoeffs)
}
/**
* Returns a polynomial with the same roots as `poly`, but only integer coefficients.
*/
final def removeDecimal(poly: Polynomial[BigDecimal]): Polynomial[BigInt] = {
if (poly == Polynomial.zero[BigDecimal]) {
Polynomial.zero[BigInt]
} else {
val terms = poly.terms.map { case Term(c, e) =>
Term(c.bigDecimal.stripTrailingZeros, e)
}
val maxScale = terms.map(_.coeff.scale).max
Polynomial(terms.map { case Term(c, e) =>
val c0 = BigInt(c.movePointRight(maxScale).unscaledValue)
Term(c0, e)
})
}
}
/**
* Returns an upper bit bound on the roots of the polynomial `p`.
*/
final def upperBound(p: Polynomial[BigInt]): Int = {
val lgLastCoeff = p.maxOrderTermCoeff.abs.bitLength
val n = p.degree
var maxBound = Double.NegativeInfinity
p.foreachNonZero { (k, coeff) =>
if (k != n) {
val i = n - k
val bound = ((coeff.abs.bitLength - lgLastCoeff - 1) / i) + 2
maxBound = max(maxBound, bound.toDouble)
}
}
if (maxBound.isValidInt) {
maxBound.toInt
} else {
throw new ArithmeticException("bound too large")
}
}
/**
* Returns an lower bit bound on the roots of the polynomial `p`.
*/
def lowerBound(p: Polynomial[BigInt]): Int =
-upperBound(p.reciprocal)
}
private[poly] class BigDecimalSimpleRoots(
val poly: Polynomial[BigDecimal],
scale: Int
) extends Roots[BigDecimal] {
private val zpoly: Polynomial[BigInt] = Roots.removeDecimal(poly)
private val isolated: Vector[Interval[Rational]] = Roots.isolateRoots(zpoly)
def count: Int = isolated.size
def get(i: Int): BigDecimal = if (i < 0 || i >= count) {
throw new IndexOutOfBoundsException(i.toString)
} else {
isolated(i) match {
case Point(value) =>
value.toBigDecimal(scale, RoundingMode.HALF_EVEN)
case Bounded(lb, ub, _) =>
new BigDecimal(
BigDecimalRootRefinement(poly, lb, ub, scale).approximateValue,
MathContext.UNLIMITED
)
case _ =>
throw new RuntimeException("invalid isolated root interval")
}
}
}
private[poly] class BigDecimalRelativeRoots(
val poly: Polynomial[BigDecimal],
mc: MathContext
) extends Roots[BigDecimal] {
private val zpoly: Polynomial[BigInt] = Roots.removeDecimal(poly)
private val isolated: Vector[Interval[Rational]] = Roots.isolateRoots(zpoly)
def count: Int = isolated.size
def get(i: Int): BigDecimal = if (i < 0 || i >= count) {
throw new IndexOutOfBoundsException(i.toString)
} else {
isolated(i) match {
case Point(value) =>
value.toBigDecimal(mc)
case Bounded(lb, ub, _) =>
Algebraic.unsafeRoot(zpoly, i, lb, ub).toBigDecimal(mc)
case _ =>
throw new RuntimeException("invalid isolated root interval")
}
}
}
// FIXME: This is pretty hacky. We should implement proper exact real roots:
// http://arxiv.org/pdf/1011.0344v2.pdf
// http://arxiv.org/pdf/1104.1362v3.pdf
private[poly] class FixedRealRoots(
val poly: Polynomial[Real]
) extends Roots[Real] {
private val zpoly: Polynomial[BigInt] = Roots.removeFractions(poly.map(_.toRational))
private val isolated: Vector[Interval[Rational]] = Roots.isolateRoots(zpoly)
def count: Int = isolated.size
def get(i: Int): Real = if (i < 0 || i >= count) {
throw new IndexOutOfBoundsException(i.toString)
} else {
isolated(i) match {
case Point(value) =>
Real(value)
case Bounded(lb, ub, _) =>
Real(Algebraic.unsafeRoot(zpoly, i, lb, ub)
.toBigDecimal(new MathContext(Real.digits, RoundingMode.HALF_EVEN)))
case _ =>
throw new RuntimeException("invalid isolated root interval")
}
}
}
private[poly] class NumberRoots(
val poly: Polynomial[Number]
) extends Roots[Number] {
private val roots = new BigDecimalRelativeRoots(poly.map(_.toBigDecimal), BigDecimal.defaultMathContext)
def count: Int = roots.count
def get(i: Int): Number = Number(roots.get(i))
}
| woparry/spire | core/src/main/scala/spire/math/poly/Roots.scala | Scala | mit | 5,237 |
package one.two.three
import _root_.scala.collection.mutable.HashSet
package object scala {
type HashSet = {
def close(): Int
}
def using: HashSet = null
/*start*/using.close()/*end*/
}
//Int | LPTK/intellij-scala | testdata/typeInference/bugs5/Closeable.scala | Scala | apache-2.0 | 207 |
package org.jetbrains.plugins.scala
package lang
package completion3
import com.intellij.codeInsight.completion.{CodeCompletionHandlerBase, CompletionType}
import com.intellij.codeInsight.lookup.impl.LookupImpl
import com.intellij.codeInsight.lookup.{Lookup, LookupElement, LookupElementPresentation, LookupManager}
import com.intellij.psi.statistics.StatisticsManager
import com.intellij.psi.statistics.impl.StatisticsManagerImpl
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter.normalize
import org.junit.Assert.{assertEquals, fail}
import scala.collection.JavaConverters
/**
* @author Alexander Podkhalyuzin
*/
abstract class ScalaCodeInsightTestBase extends ScalaLightCodeInsightFixtureTestAdapter {
import CompletionType.BASIC
import Lookup.REPLACE_SELECT_CHAR
import ScalaCodeInsightTestBase._
protected override def setUp(): Unit = {
super.setUp()
StatisticsManager.getInstance match {
case impl: StatisticsManagerImpl => impl.enableStatistics(getTestRootDisposable)
}
}
override def getTestDataPath: String =
s"${super.getTestDataPath}completion3/"
protected final def activeLookup: Option[LookupImpl] =
LookupManager.getActiveLookup(getEditor) match {
case impl: LookupImpl => Some(impl)
case _ => None
}
protected final def lookups(predicate: LookupElement => Boolean): Seq[LookupElement] =
activeLookup match {
case Some(lookup) => lookupItems(lookup).filter(predicate)
case _ => Seq.empty
}
protected def doCompletionTest(fileText: String,
resultText: String,
item: String,
char: Char = REPLACE_SELECT_CHAR,
time: Int = DEFAULT_TIME,
completionType: CompletionType = BASIC): Unit =
doCompletionTest(fileText, resultText, char, time, completionType) {
hasLookupString(_, item)
}
protected final def doCompletionTest(fileText: String,
resultText: String,
char: Char,
time: Int,
completionType: CompletionType)
(predicate: LookupElement => Boolean): Unit = {
configureTest(fileText, completionType, time)
val maybePair = for {
lookup <- activeLookup
item <- lookupItems(lookup).find(predicate)
} yield (lookup, item)
maybePair match {
case Some((lookup, item)) =>
lookup.finishLookup(char, item)
checkResultByText(resultText)
case _ => fail("Lookups not found")
}
}
protected def doMultipleCompletionTest(fileText: String,
count: Int,
item: String,
completionType: CompletionType = BASIC,
time: Int = DEFAULT_TIME): Unit =
doMultipleCompletionTest(fileText, completionType, time, count) {
hasLookupString(_, item)
}
protected final def doMultipleCompletionTest(fileText: String,
completionType: CompletionType,
time: Int,
count: Int)
(predicate: LookupElement => Boolean): Unit = {
configureTest(fileText, completionType, time)
assertEquals(count, lookups(predicate).size)
}
protected def checkNoCompletion(fileText: String,
item: String,
completionType: CompletionType = BASIC,
time: Int = DEFAULT_TIME): Unit =
doMultipleCompletionTest(fileText, 0, item, completionType, time)
protected final def checkNoCompletion(fileText: String,
completionType: CompletionType,
time: Int)
(predicate: LookupElement => Boolean): Unit =
doMultipleCompletionTest(fileText, completionType, time, 0)(predicate)
protected final def configureTest(fileText: String,
completionType: CompletionType = BASIC,
time: Int = DEFAULT_TIME): Unit = {
configureFromFileText(fileText)
new CodeCompletionHandlerBase(completionType, false, false, true)
.invokeCompletion(getProject, getEditor, time, false, false)
}
protected def checkResultByText(expectedFileText: String, ignoreTrailingSpaces: Boolean = true): Unit =
getFixture.checkResult(normalize(expectedFileText), ignoreTrailingSpaces)
}
object ScalaCodeInsightTestBase {
val DEFAULT_TIME: Int = 1
def hasLookupString(lookup: LookupElement, lookupString: String): Boolean =
lookup.getLookupString == lookupString
def hasItemText(lookup: LookupElement,
lookupString: String,
itemText: String,
itemTextItalic: Boolean = false,
tailText: String = null): Boolean =
hasLookupString(lookup, lookupString) && {
val presentation = new LookupElementPresentation
lookup.renderElement(presentation)
presentation.getItemText == itemText &&
presentation.isItemTextItalic == itemTextItalic &&
presentation.getTailText == tailText
}
private def lookupItems(lookup: LookupImpl) = {
import JavaConverters._
lookup.getItems.asScala
}
} | jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/completion3/ScalaCodeInsightTestBase.scala | Scala | apache-2.0 | 5,752 |
package at.logic.gapt.cutintro
import at.logic.gapt.expr._
/**
* Computes the unified literals, i.e. the set of literals that are used to contruct the cut formula
*/
object gStarUnify {
/**
* Computes the unified literals, i.e. the set of literals that are used to contruct the cut formula
* @param seHs The given schematic Pi2-grammar
* @param nameOfExistentialVariable Name of the existential variable of the cut-formula
* @param nameOfUniversalVariable Name of the universal variable of the cut-formula
* @return Set of unified literals
*/
def apply(
seHs: Pi2SeHs,
nameOfExistentialVariable: Var,
nameOfUniversalVariable: Var
): Set[Formula] = {
// To compute the unified literals, we have to consider all unification pairs.
// These are pairs of literals (P,Q) occurring in the reduced representation such that:
// The universal eigenvariable alpha may occurs in P but none of the existential eigenvariables beta_1,...,beta_m.
// Some of the existential eigenvariables beta_1,...,beta_m may occur in Q but not the universal eigenvariable alpha.
// Unification pairs are unifiable if there are terms s,t such that a replacement of terms in P by s and a replacement
// of terms in Q by s turn the result into dual literals. Therefore, we can assume that exactly one literal is negated.
// Note that the set itself indicates whether the literal is negated or not. The negation has already been dropped.
val ( alpha, beta, neutral ) = seHs.literalsInTheDNTAs
val ( alphaPos, alphaNeg ) = seHs.sortAndAtomize( alpha )
val ( betaPos, betaNeg ) = seHs.sortAndAtomize( beta )
val ( neutralPos, neutralNeg ) = seHs.sortAndAtomize( neutral )
// Set that eventually becomes the return value, i.e. the set of unified literals
( for {
( alphas, betas ) <- Seq(
alphaPos -> betaNeg, alphaPos -> neutralNeg,
alphaNeg -> betaPos, alphaNeg -> neutralPos,
neutralNeg -> betaPos, neutralPos -> betaNeg
)
posAt <- alphas
negAt <- betas
lit <- unifyLiterals( seHs, posAt, negAt, nameOfExistentialVariable, nameOfUniversalVariable )
} yield lit ).toSet
}
/**
* Checks whether the literals (only the names without the arguments) are dual to each other and calls
* the unify function
* @param seHs The given schematic Pi2-grammar
* @param posAt First element of the unification pair
* @param negAt Second element of the unification pair
* @param nameOfExistentialVariable Name of the existential variable of the cut-formula
* @param nameOfUniversalVariable Name of the universal variable of the cut-formula
* @return Option type that might contain an unified literal, i.e. a literal in which neither the universal
* nor one of the existential eigenvariables occurs, but maybe nameOfExistentialVariable or nameOfUniversalVariable
*/
private def unifyLiterals(
seHs: Pi2SeHs,
posAt: Formula,
negAt: Formula,
nameOfExistentialVariable: Var,
nameOfUniversalVariable: Var
): Option[Formula] = {
// nameOfPos and nameOfNeg are the names of the corresponding atoms that have to be equal. Otherwise, there is no unified literal.
// In the case that the names are equal, we call the unify function with the arguments argsP and argsN of the corresponding literals.
val Apps( nameOfPos, argsP ): Formula = posAt
val Apps( nameOfNeg, argsN ): Formula = negAt
val unifiedLiteral: Option[Formula] = nameOfPos match {
case t if ( ( nameOfNeg == t ) && ( argsP.length == argsN.length ) ) => {
val unifiedArgs = unify(
seHs,
argsP.zip( argsN ),
nameOfExistentialVariable,
nameOfUniversalVariable
)
val theUnifiedLiteral = unifiedArgs match {
case Some( s ) => {
if ( s.length == argsP.length ) {
Some( Apps( nameOfPos, s ).asInstanceOf[Formula] )
} else {
None
}
}
case _ => None
}
theUnifiedLiteral
}
case _ => None
}
unifiedLiteral
}
/**
* Compares a zipped list of arguments and decides whether a pair of this list is unifiable corresponding to a
* grammar seHs (see productionRules), whether we have to call the unify function on the subterms of the pair, or whether
* the pair is not unifiable, i.e. whether to stop the whole function and return None
* @param seHs The given schematic Pi2-grammar
* @param zippedArgs Two lists of terms (Expr) that will be compared pairwise
* @param nameOfExistentialVariable Name of the existential variable of the cut-formula
* @param nameOfUniversalVariable Name of the universal variable of the cut-formula
* @return An option type that might contain a list of terms (Expr) of the same length of zippedArgs in which neither the universal
* nor one of the existential eigenvariables occurs, but maybe nameOfExistentialVariable or nameOfUniversalVariable
*/
private def unify(
seHs: Pi2SeHs,
zippedArgs: List[( Expr, Expr )],
nameOfExistentialVariable: Var,
nameOfUniversalVariable: Var
): Option[Seq[Expr]] = {
var unifiedTerms: Option[Seq[Expr]] = None
// A run through all pairs
zippedArgs.foreach( t => {
unifiedTerms = unifiedTerms match {
case Some( old ) => unifyPair( seHs, t, nameOfExistentialVariable, nameOfUniversalVariable ) match {
case Some( update ) => Option( old :+ update )
case None => return None
}
case None => unifyPair( seHs, t, nameOfExistentialVariable, nameOfUniversalVariable ) match {
case Some( update ) => Option( Seq( update ) )
case None => return None
}
}
} )
unifiedTerms
}
private def unifyPair(
seHs: Pi2SeHs,
termPair: ( Expr, Expr ),
nameOfExistentialVariable: Var,
nameOfUniversalVariable: Var
): Option[Expr] = {
// If there are substitutions tL and tR for the universal variable of the cut formula then we can
// replace tL or tR with nameOfUniversalVariable, i.e. we extend the current list of arguments with
// nameOfUniversalVariable and stop the loop for the current pair of terms
unifyPairAccordingTo( seHs.productionRulesXS, termPair, nameOfUniversalVariable ) match {
case Some( update ) => return Option( update )
case None =>
}
// If there are substitutions tL and tR for the existential variable of the cut formula then we can
// replace tL or tR with nameOfExistentialVariable, i.e. we extend the current list of arguments with
// nameOfExistentialVariable and stop the loop for the current pair of terms
unifyPairAccordingTo( seHs.productionRulesYS, termPair, nameOfExistentialVariable ) match {
case Some( update ) => return Option( update )
case None =>
}
// Since we could not unify the pair so far, we have to check whether the outermost function of the terms
// is equal, whether the terms are equal, whether the terms are eigenvariables, or whether the pair is
// not unifiable
val ( tL, tR ) = termPair
val Apps( nameOfArgL, argsOfArgL ) = tL
val Apps( nameOfArgR, argsOfArgR ) = tR
// If the terms are equal, we have to check whether the terms contain eigenvariables and replace them
if ( ( nameOfArgL == nameOfArgR ) && ( argsOfArgL.length == argsOfArgR.length ) ) {
if ( tL.syntaxEquals( seHs.universalEigenvariable ) ) return Option( nameOfUniversalVariable )
seHs.existentialEigenvariables.foreach( existentialEigenvariable => if ( tL.syntaxEquals( existentialEigenvariable ) ) {
return Option( nameOfExistentialVariable )
} )
if ( argsOfArgL.length == 0 ) return Some( tL )
unify(
seHs,
argsOfArgL.zip( argsOfArgR ),
nameOfExistentialVariable,
nameOfUniversalVariable
) match {
case Some( r ) => {
if ( argsOfArgL.length == r.length ) return Some( Apps( nameOfArgL, r ) )
}
case None =>
}
}
None
}
private def unifyPairAccordingTo(
productionRules: List[( Expr, Expr )],
termPair: ( Expr, Expr ),
name: Var
): Option[Expr] =
if ( productionRules contains termPair ) Some( name ) else None
}
| gebner/gapt | core/src/main/scala/at/logic/gapt/cutintro/gStarUnify.scala | Scala | gpl-3.0 | 8,588 |
package pl.touk.nussknacker.engine.flink.util.transformer.aggregate
import org.apache.flink.streaming.api.windowing.triggers.{Trigger, TriggerResult}
import org.apache.flink.streaming.api.windowing.windows.Window
object triggers {
//NOTE: composing triggers is tricky. We may want to e.g. ignore TriggerResult from delegate, but still we invoke e.g. onEventTime, because we want to handle triggers
abstract class DelegatingTrigger[T, W <: Window](delegate: Trigger[_ >: T, W]) extends Trigger[T, W] {
override def onElement(element: T, timestamp: Long, window: W, ctx: Trigger.TriggerContext): TriggerResult = delegate.onElement(element, timestamp, window, ctx)
override def onProcessingTime(time: Long, window: W, ctx: Trigger.TriggerContext): TriggerResult = delegate.onProcessingTime(time, window, ctx)
override def onEventTime(time: Long, window: W, ctx: Trigger.TriggerContext): TriggerResult = delegate.onEventTime(time, window, ctx)
override def clear(window: W, ctx: Trigger.TriggerContext): Unit = delegate.clear(window, ctx)
override def canMerge: Boolean = delegate.canMerge
override def onMerge(window: W, ctx: Trigger.OnMergeContext): Unit = delegate.onMerge(window, ctx)
}
//Window won't be emitted on end, but after each event. This would be useful e.g. when we want to have
//daily (i.e. for current day) aggregate for each incoming event, but we're not interested in daily summary on each midnight
case class FireOnEachEvent[T, W <: Window](delegate: Trigger[_ >: T, W]) extends DelegatingTrigger[T, W](delegate) {
override def onElement(element: T, timestamp: Long, window: W, ctx: Trigger.TriggerContext): TriggerResult = {
val result = super.onElement(element, timestamp, window, ctx)
result match {
case TriggerResult.CONTINUE => TriggerResult.FIRE
case TriggerResult.PURGE => TriggerResult.FIRE_AND_PURGE
case fire => fire
}
}
override def onProcessingTime(time: Long, window: W, ctx: Trigger.TriggerContext): TriggerResult = {
super.onProcessingTime(time, window, ctx)
TriggerResult.CONTINUE
}
override def onEventTime(time: Long, window: W, ctx: Trigger.TriggerContext): TriggerResult = {
super.onEventTime(time, window, ctx)
TriggerResult.CONTINUE
}
}
case class ClosingEndEventTrigger[T, W <: Window](delegate: Trigger[_ >: T, W],
endFunction: T => Boolean) extends DelegatingTrigger[T, W](delegate) {
override def onElement(element: T, timestamp: Long, window: W, ctx: Trigger.TriggerContext): TriggerResult = {
if (endFunction(element)) {
TriggerResult.FIRE_AND_PURGE
} else super.onElement(element, timestamp, window, ctx)
}
}
}
| TouK/nussknacker | engine/flink/components/base/src/main/scala/pl/touk/nussknacker/engine/flink/util/transformer/aggregate/triggers.scala | Scala | apache-2.0 | 2,782 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util.concurrent.atomic
class AtomicReferenceArray[E <: AnyRef](
length: Int) extends Serializable {
def this(array: Array[E]) = {
this(array.size)
System.arraycopy(array, 0, inner, 0, length)
}
private val inner: Array[AnyRef] = new Array[AnyRef](length)
final def length(): Int =
inner.length
final def get(i: Int): E =
inner(i).asInstanceOf[E]
final def set(i: Int, newValue: E): Unit =
inner(i) = newValue
final def lazySet(i: Int, newValue: E): Unit =
set(i, newValue)
final def getAndSet(i: Int, newValue: E): E = {
val ret = get(i)
set(i, newValue)
ret
}
final def compareAndSet(i: Int, expect: E, update: E): Boolean = {
if (get(i) ne expect) false else {
set(i, update)
true
}
}
final def weakCompareAndSet(i: Int, expect: E, update: E): Boolean =
compareAndSet(i, expect, update)
override def toString(): String =
inner.mkString("[", ", ", "]")
}
| SebsLittleHelpers/scala-js | javalib/src/main/scala/java/util/concurrent/atomic/AtomicReferenceArray.scala | Scala | apache-2.0 | 1,248 |
/*
* Copyright 2010 Michael Fortin <mike@brzy.org>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.webapp.action
import collection.mutable.Buffer
import java.util.regex.Pattern
/**
* * A representation of a RESTful like path. This is used to compare paths of actions to see
* if they're eligible for execution on the action.
*
* @author Michael Fortin
* @param ctlrBase The path of the controller
* @param actionBase The path of the action
*/
case class Path(ctlrBase: String, actionBase: String) extends Ordered[Path] {
/**
* The combined controller and action paths.
*/
protected val path = {
val combined =
if(actionBase.equals(""))
ctlrBase
else if (!actionBase.startsWith("/"))
ctlrBase + "/" + actionBase
else
ctlrBase + actionBase
val preSlash =
if (!combined.startsWith("/"))
"/" + combined
else
combined
preSlash.replaceAll("//", "/")
}
/**
* Splits the url into parts for comparison with the request uri.
*/
protected val pathTokens = path.replaceAll("//", "/").split("/")
/**
* Create a regex expression out of the pattern path.
*/
protected val pathMatcher = {
val patternTokens = path.split("""\\{|\\}""").map(it=>{
if(!it.contains("/") && it.contains(":"))
"(" + it.split(":")(1) + ")"
else if(!it.contains("/"))
"""(.*?)"""
else
it
})
val pathPattern = "^" + patternTokens.foldLeft("")((a,b)=>a+b) + "$"
pathPattern.r
}
/**
* Check to see of the request uri matches the path expression of this action.
*
* @param contextPath The path to compare to this actions path.
* @return true if it matches
*/
def isMatch(contextPath: String) = {
val urlTokens: Array[String] = contextPath.replaceAll("//", "/").split("/")
if (pathTokens.size == urlTokens.size) {
val tokens = pathTokens.zip(urlTokens)
tokens.forall({case (a,b) =>
if (isPattern(a))
toPattern(a).findFirstIn(b).isDefined
else
a == b
})
}
else
false
}
/**
* check each path token to see if it's a url path variable.
*/
protected def isPattern(a:String) = a.endsWith("}") && a.startsWith("{")
protected def toPattern(a:String) = {
val minusCurlies = a.substring(1,a.length() -1)
if (minusCurlies.contains(":"))
minusCurlies.substring(minusCurlies.indexOf(":")+1,minusCurlies.length()).r
else
"(.*?)".r
}
def extractParameterValues(contextPath: String) = {
val buffer = Buffer[String]()
pathMatcher.findFirstMatchIn(contextPath) match {
case Some(matcher) =>
for (i <- 1 to matcher.groupCount)
buffer += matcher.group(i)
case _ =>
}
buffer.toArray
}
/**
* Pull out named parameters, up to an optional colon
*/
val parameterNames = {
val buffer = Buffer[String]()
val curlyMatcher = Pattern.compile("""\\{(.*?)\\}""").matcher(path)
while (curlyMatcher.find) {
val id = curlyMatcher.group(1)
if (id.contains(":"))
buffer += id.substring(0,id.indexOf(":"))
else
buffer += id
}
buffer.toArray
}
def compare(that: Path) = this.path.compareTo(that.path)
} | m410/brzy | src/main/scala/org/brzy/webapp/action/Path.scala | Scala | apache-2.0 | 3,789 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package internal
package parser
import java.io.File
import scala.io.Source
object NewFormatSpec extends AbstractSpec {
implicit val splitter: SplitExpressions.SplitExpression = EvaluateConfigurations.splitExpressions
test("New Format should handle lines") {
val rootPath = getClass.getResource("/new-format").getPath
println(s"Reading files from: $rootPath")
val allFiles = new File(rootPath).listFiles.toList
allFiles foreach { path =>
println(s"$path")
val lines = Source.fromFile(path).getLines().toList
val (_, statements) = splitter(path, lines)
assert(statements.nonEmpty, s"""
|***should contains statements***
|$lines """.stripMargin)
}
}
}
| sbt/sbt | main/src/test/scala/sbt/internal/parser/NewFormatSpec.scala | Scala | apache-2.0 | 883 |
package structures
package laws
package discipline
import org.scalacheck.Arbitrary
import org.scalacheck.Prop._
trait ContravariantDiscipline[F[_]] extends ExponentialDiscipline[F] {
def laws: ContravariantLaws[F]
def contravariant[A, B, C](implicit
arbFA: Arbitrary[F[A]],
arbA: Arbitrary[A],
arbB: Arbitrary[B],
arbC: Arbitrary[C],
eqFA: Equal[F[A]],
eqFC: Equal[F[C]]
): RuleSet = new DefaultRuleSet(
name = "contravariant",
parent = Some(exponential[A, B, C]),
props =
"contravariant identity" -> forAll { (fa: F[A]) =>
laws.contravariantIdentity(fa).isEqual
},
"contravariant composition" -> forAll { (fa: F[A], f: B => A, g: C => B) =>
laws.contravariantComposition(fa, f, g).isEqual
}
)
}
object ContravariantDiscipline {
def apply[F[_]: Contravariant]: ContravariantDiscipline[F] = new ContravariantDiscipline[F] {
def laws = ContravariantLaws[F]
}
}
| mpilquist/Structures | discipline/src/main/scala/structures/laws/discipline/ContravariantDiscipline.scala | Scala | bsd-3-clause | 956 |
package play.json.extra
import org.scalatest.FunSuite
import play.api.libs.json.{JsSuccess, Json}
final case class DefaultValueTestClass(@key("ok-field") field:String="p", lp:List[String]=Nil,
l1:Option[Option[Int]])
class DefaultValueTest extends FunSuite {
test("json default serialize/deserialize") {
import play.json.extra.implicits.optionWithNull
implicit def fmt1 = Jsonx.formatCaseClassUseDefaults[DefaultValueTestClass]
val t1=DefaultValueTestClass(lp=Nil, l1=None)
// println(Json.stringify(Json.toJson(t1)))
assert(Json.stringify(Json.toJson(t1)) === """{"ok-field":"p","lp":[]}""")
assert(t1 === Json.parse("{}").as[DefaultValueTestClass])
}
}
| aparo/play-json-extra | play-json-extra/jvm/src/test/scala/play/json/extra/DefaultValueTest.scala | Scala | apache-2.0 | 723 |
package com.twitter.scalding
/**
* This class is used to assist with testing a TypedPipe
*/
object TypedPipeChecker {
/*
* Takes a List and a transform function.
* The resulting TypedPipe form the transform will be run through asserts
*/
def checkOutputTransform[T, U, R](input: List[T])(transform: TypedPipe[T] => TypedPipe[U])(assertions: List[U] => R): R =
assertions(inMemoryToList(transform(TypedPipe.from(input))))
/*
* Execute a TypedPipe in memory, convert the resulting Iterator to
* a list and run it through a function that makes arbitrary
* assertions on it.
*/
def checkOutput[T, R](output: TypedPipe[T])(assertions: List[T] => R): R =
assertions(inMemoryToList(output))
/**
* Execute a TypedPipe in memory and return the result as a List
*/
def inMemoryToList[T](output: TypedPipe[T]): List[T] =
output
.toIterableExecution
.waitFor(Config.unitTestDefault, Local(strictSources = true))
.get
.toList
}
| rubanm/scalding | scalding-core/src/main/scala/com/twitter/scalding/TypedPipeChecker.scala | Scala | apache-2.0 | 996 |
package tests.bloomfilter
import bloomfilter.CanGetDataFrom.CanGetDataFromArrayChar
import org.scalatest.{FreeSpec, Matchers}
class CanGetDataFromSpec extends FreeSpec with Matchers {
"CanGetDataFromArrayChar" in {
CanGetDataFromArrayChar.getByte(Array[Char]('a'), 0) shouldEqual 97.toByte
CanGetDataFromArrayChar.getByte(Array[Char]('a'), 1) shouldEqual 0.toByte
CanGetDataFromArrayChar.getByte(Array[Char]('a', 'b'), 0) shouldEqual 97.toByte
CanGetDataFromArrayChar.getByte(Array[Char]('a', 'b'), 1) shouldEqual 0.toByte
CanGetDataFromArrayChar.getByte(Array[Char]('a', 'b'), 2) shouldEqual 98.toByte
CanGetDataFromArrayChar.getByte(Array[Char]('a', 'b'), 3) shouldEqual 0.toByte
CanGetDataFromArrayChar.getLong(Array[Char]('a', 'b', 'c', 'd'), 0) shouldEqual
(0.toLong << 56) |
(('d'.toByte & 0xffL) << 48) |
((0 & 0xffL) << 40) |
(('c'.toByte & 0xffL) << 32) |
((0 & 0xffL) << 24) |
(('b' & 0xffL) << 16) |
((0 & 0xffL) << 8) |
'a' & 0xffL
}
}
| alexandrnikitin/bloom-filter-scala | tests/src/test/scala/tests/bloomfilter/CanGetDataFromSpec.scala | Scala | mit | 1,077 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.test
import java.io.File
import org.apache.hadoop.fs.Path
import org.schedoscope.Schedoscope
import org.schedoscope.dsl.storageformats.Avro
import org.schedoscope.dsl.{FieldLike, View}
import scala.collection.mutable.ListBuffer
/**
* This trait enables a view to be loaded with the results of it's
* transformation during tests.
*/
trait LoadableView extends WritableView {
var sortedBy: Option[FieldLike[_]] = None
var dependencyCheckDisabled = false
var transformationValidationDisabled = false
val inputFixtures = ListBuffer.empty[View with WritableView]
val localResources = ListBuffer.empty[(String, String)]
/**
* Fills this view with data from hive, potentially sorted by a column
*
* @param orderedBy the optional FieldLike for the column to sort by
*/
def populate(orderedBy: Option[FieldLike[_]]) {
val db = resources.database
rowData.clear()
rowData.appendAll(db.selectView(this, orderedBy))
}
/**
* Adds dependencies for this view
*/
def basedOn(d: View with WritableView*) {
d.foreach { el =>
el.resources = resources
el.createViewTable()
}
//check
inputFixtures ++= d
}
/**
* Compares the dependencies of the tested view
* and the added dependencies to the test
*
* @return true if dependencies match
*/
def checkDependencies(): Boolean = {
if (inputFixtures.isEmpty && dependencies.isEmpty) {
return true
}
val dependencyNames = dependencies
.map(v => v.dbName + "." + v.tableName)
.distinct
.toList
val depNames = inputFixtures.map(v => v.dbName + "." + v.tableName)
.distinct
.toList
if (depNames.length == dependencyNames.length) {
depNames
.map(dependencyNames.contains(_))
.reduce(_ && _)
} else {
false
}
}
def loadLocalResources(): Unit = {
localResources.foreach {
case (prop, file) =>
val fs = resources.fileSystem
val src = new Path(file)
val target = new Path(s"${resources.remoteTestDirectory}/${src.getName}")
if (fs.exists(target))
fs.delete(target, false)
fs.copyFromLocalFile(src, target)
configureTransformation(prop, target.toString.replaceAll("^file:/", "file:///"))
}
}
/**
* Configures the associated transformation with the given property (as
* key value pair)
*/
def withConfiguration(k: String, v: Any) {
configureTransformation(k, v)
}
/**
* TODO:
* Configures the associated transformation with the given property (as
* key value pair)
*/
def withConfiguration(conf: Map[String, Any]) {
conf.foreach {
case (k, v) =>
configureTransformation(k, v)
}
}
/**
* Configures the associated transformation with the given property (as
* multiple key value pairs)
*/
def withConfiguration(c: (String, Any)*) {
c.foreach(e => this.configureTransformation(e._1, e._2))
}
/**
* Register a local resource which will be added to the configuration and
* loaded into the hdfs during tests.
*/
def withResource(res: (String, String)*) {
localResources ++= res
}
/**
* Disable the matching of the dependencies of the view
* and views passed to basedOn()
*/
def disableDependencyCheck(): Unit = {
dependencyCheckDisabled = true
}
/**
* Disable potential checks of the transformation logic
*/
def disableTransformationValidation(): Unit = {
transformationValidationDisabled = true
}
/**
*
* @param fieldLike field to desc
*/
def sortRowsBy(fieldLike: FieldLike[_]): Unit = {
sortedBy = Some(fieldLike)
}
}
/**
* This trait implements most of the schedoscope test DSL. it extends View
* with methods to generate test data, execute local hive and assertions
*/
trait test extends LoadableView with AccessRowData {
/**
* Execute the hive query in test on previously specified test fixtures
*/
def `then`() {
`then`(null,
disableDependencyCheck = false,
disableTransformationValidation = false,
disableLineageValidation = true)
}
/**
* Execute the hive query in test on previously specified test fixtures.
*
* @param sortedBy sort the table by field
* @param disableDependencyCheck disable dependency checks
* @param disableTransformationValidation disable transformation validation
* @param disableLineageValidation disable lineage validation
*/
def `then`(sortedBy: FieldLike[_] = null,
disableDependencyCheck: Boolean = false,
disableTransformationValidation: Boolean = false,
disableLineageValidation: Boolean = Schedoscope.settings.disableLineageValidation) {
TestUtils.loadView(this, sortedBy, disableDependencyCheck, disableTransformationValidation,
disableLineageValidation)
}
override def rowId() = {
WritableView.rowIdPattern.format(rowIdx)
}
override def numRows() = {
rowData.size
}
override def tablePath = storageFormat match {
case Avro(testPath, _) => new File(getClass.getResource("/" + testPath).getPath).getParentFile.getAbsolutePath
case _ => tablePathBuilder(env, viewDataHdfsRoot)
}
override def avroSchemaPathPrefix = storageFormat match {
case Avro(testPath, _) => new File(getClass.getResource("/").getPath).getAbsolutePath
case _ => avroSchemaPathPrefixBuilder(env)
}
}
/**
* Syntactic sugar for [[ReusableHiveSchema]] tests
*/
trait OutputSchema extends LoadableView | utzwestermann/schedoscope | schedoscope-core/src/main/scala/org/schedoscope/test/LoadableView.scala | Scala | apache-2.0 | 6,248 |
package net.ellisw.quvault.lift
import scala.xml.Node
object QuAnswerMode extends Enumeration {
val Blank, Answer, Edit, Solution = Value
}
class QuRenderSpec(
val xml: Node,
val answers: Map[String, String], // REFACTOR: this value isn't currently used -- but we should use it instead of QuRequestVars.answers in the QuRenderer* classes
val scores: scala.collection.Map[String, ScoreData],
val mode: QuAnswerMode.Value,
val bShowValidations: Boolean,
val bShowScores: Boolean,
val bShowSolutions: Boolean
)
| ellis/2009QuVault | quvault/server-lift/src/main/scala/net/ellisw/quvault/lift/QuRenderSpec.scala | Scala | gpl-3.0 | 528 |
package com.github.vooolll.services
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import scala.concurrent.ExecutionContext
trait AppResources {
implicit val actorSystem: ActorSystem
implicit val materializer: ActorMaterializer
implicit val executionContext: ExecutionContext
}
class FacebookAppResources extends AppResources {
override implicit val actorSystem: ActorSystem = ActorSystem()
override implicit val executionContext: ExecutionContext =
actorSystem.dispatcher
override implicit val materializer: ActorMaterializer = ActorMaterializer()
}
object FacebookAppResources {
def apply(): FacebookAppResources = new FacebookAppResources()
}
| vooolll/facebook4s | src/main/scala/com/github/vooolll/services/AppResources.scala | Scala | apache-2.0 | 689 |
package ua.t3hnar.plugins.cmdsupport.lang
import com.intellij.lang.Language
object CmdLanguage extends Language("Cmd",
"application/x-batch",
"application/x-bat",
"application/x-cmd",
"text/x-script.bat",
"text/x-script.cmd") | t3hnar/CmdSupport | src/ua/t3hnar/plugins/cmdsupport/lang/CmdLanguage.scala | Scala | apache-2.0 | 238 |
package kamkor.actor
import akka.actor.{ Actor, Props, UnboundedStash, ActorLogging }
import scala.concurrent.duration.DurationInt
class Consumer(val processingTimeMillis: Int) extends Actor with UnboundedStash with ActorLogging {
import context.dispatcher
def receive: Receive = {
case data: Array[Int] => {
context.become(processing, discardOld = false)
context.system.scheduler.scheduleOnce(processingTimeMillis.millis, self, "endProcessing")
}
}
def processing: Receive = {
case data: Array[Int] => stash()
case "endProcessing" => {
log.debug("endProcessing") // for unit test
unstashAll()
context.unbecome()
}
}
}
object Consumer {
def props(processingTimeMillis: Int): Props = Props(new Consumer(processingTimeMillis))
}
| TracyLu/akka-cluster-load-balancing | src/main/scala/kamkor/actor/Consumer.scala | Scala | mit | 798 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.graph.scala.examples
import java.lang.Long
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.scala._
import org.apache.flink.graph.Edge
import org.apache.flink.graph.examples.data.ConnectedComponentsDefaultData
import org.apache.flink.graph.library.GSAConnectedComponents
import org.apache.flink.graph.scala._
import org.apache.flink.types.NullValue
/**
* This example shows how to use Gelly's library methods.
* You can find all available library methods in [[org.apache.flink.graph.library]].
*
* In particular, this example uses the
* [[GSAConnectedComponents]]
* library method to compute the connected components of the input graph.
*
* The input file is a plain text file and must be formatted as follows:
* Edges are represented by tuples of srcVertexId, trgVertexId which are
* separated by tabs. Edges themselves are separated by newlines.
* For example: <code>1\t2\n1\t3\n</code> defines two edges,
* 1-2 and 1-3.
*
* Usage {{
* ConnectedComponents <edge path> <result path> <number of iterations>
* }}
* If no parameters are provided, the program is run with default data from
* [[ConnectedComponentsDefaultData]]
*/
object ConnectedComponents {
def main(args: Array[String]) {
if (!parseParameters(args)) {
return
}
val env = ExecutionEnvironment.getExecutionEnvironment
val edges: DataSet[Edge[Long, NullValue]] = getEdgesDataSet(env)
val graph = Graph.fromDataSet[Long, Long, NullValue](edges, new InitVertices, env)
val components = graph.run(new GSAConnectedComponents[Long, Long, NullValue](maxIterations))
// emit result
if (fileOutput) {
components.writeAsCsv(outputPath, "\n", ",")
env.execute("Connected Components Example")
} else {
components.print()
}
}
private final class InitVertices extends MapFunction[Long, Long] {
override def map(id: Long) = id
}
// ***********************************************************************
// UTIL METHODS
// ***********************************************************************
private var fileOutput = false
private var edgesInputPath: String = null
private var outputPath: String = null
private var maxIterations = ConnectedComponentsDefaultData.MAX_ITERATIONS
private def parseParameters(args: Array[String]): Boolean = {
if(args.length > 0) {
if(args.length != 3) {
System.err.println("Usage ConnectedComponents <edge path> <output path> " +
"<num iterations>")
}
fileOutput = true
edgesInputPath = args(0)
outputPath = args(1)
maxIterations = 2
} else {
System.out.println("Executing ConnectedComponents example with default parameters" +
" and built-in default data.")
System.out.println(" Provide parameters to read input data from files.")
System.out.println(" See the documentation for the correct format of input files.")
System.out.println("Usage ConnectedComponents <edge path> <output path> " +
"<num iterations>")
}
true
}
private def getEdgesDataSet(env: ExecutionEnvironment): DataSet[Edge[Long, NullValue]] = {
if (fileOutput) {
env.readCsvFile[(Long, Long)](edgesInputPath,
lineDelimiter = "\n",
fieldDelimiter = "\t")
.map(edge => new Edge[Long, NullValue](edge._1, edge._2, NullValue.getInstance))
} else {
val edgeData = ConnectedComponentsDefaultData.DEFAULT_EDGES map {
case Array(x, y) => (x.asInstanceOf[Long], y.asInstanceOf[Long])
}
env.fromCollection(edgeData).map(
edge => new Edge[Long, NullValue](edge._1, edge._2, NullValue.getInstance))
}
}
}
| hequn8128/flink | flink-libraries/flink-gelly-examples/src/main/scala/org/apache/flink/graph/scala/examples/ConnectedComponents.scala | Scala | apache-2.0 | 4,607 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.io.{File, NotSerializableException}
import java.lang.Thread.UncaughtExceptionHandler
import java.lang.management.ManagementFactory
import java.net.{URI, URL}
import java.nio.ByteBuffer
import java.util.Properties
import java.util.concurrent._
import javax.annotation.concurrent.GuardedBy
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, Map}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.memory.TaskMemoryManager
import org.apache.spark.rpc.RpcTimeout
import org.apache.spark.scheduler.{DirectTaskResult, IndirectTaskResult, Task, TaskDescription}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.storage.{StorageLevel, TaskResultBlockId}
import org.apache.spark.util._
import org.apache.spark.util.io.ChunkedByteBuffer
/**
* Spark executor, backed by a threadpool to run tasks.
*
* This can be used with Mesos, YARN, and the standalone scheduler.
* An internal RPC interface is used for communication with the driver,
* except in the case of Mesos fine-grained mode.
*/
private[spark] class Executor(
executorId: String,
executorHostname: String,
env: SparkEnv,
userClassPath: Seq[URL] = Nil,
isLocal: Boolean = false,
uncaughtExceptionHandler: UncaughtExceptionHandler = new SparkUncaughtExceptionHandler)
extends Logging {
logInfo(s"Starting executor ID $executorId on host $executorHostname")
// Application dependencies (added through SparkContext) that we've fetched so far on this node.
// Each map holds the master's timestamp for the version of that file or JAR we got.
private val currentFiles: HashMap[String, Long] = new HashMap[String, Long]()
private val currentJars: HashMap[String, Long] = new HashMap[String, Long]()
private val EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new Array[Byte](0))
private val conf = env.conf
// No ip or host:port - just hostname
Utils.checkHost(executorHostname)
// must not have port specified.
assert (0 == Utils.parseHostPort(executorHostname)._2)
// Make sure the local hostname we report matches the cluster scheduler's name for this host
Utils.setCustomHostname(executorHostname)
if (!isLocal) {
// Setup an uncaught exception handler for non-local mode.
// Make any thread terminations due to uncaught exceptions kill the entire
// executor process to avoid surprising stalls.
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
}
// Start worker thread pool
private val threadPool = {
val threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Executor task launch worker-%d")
.setThreadFactory(new ThreadFactory {
override def newThread(r: Runnable): Thread =
// Use UninterruptibleThread to run tasks so that we can allow running codes without being
// interrupted by `Thread.interrupt()`. Some issues, such as KAFKA-1894, HADOOP-10622,
// will hang forever if some methods are interrupted.
new UninterruptibleThread(r, "unused") // thread name will be set by ThreadFactoryBuilder
})
.build()
Executors.newCachedThreadPool(threadFactory).asInstanceOf[ThreadPoolExecutor]
}
private val executorSource = new ExecutorSource(threadPool, executorId)
// Pool used for threads that supervise task killing / cancellation
private val taskReaperPool = ThreadUtils.newDaemonCachedThreadPool("Task reaper")
// For tasks which are in the process of being killed, this map holds the most recently created
// TaskReaper. All accesses to this map should be synchronized on the map itself (this isn't
// a ConcurrentHashMap because we use the synchronization for purposes other than simply guarding
// the integrity of the map's internal state). The purpose of this map is to prevent the creation
// of a separate TaskReaper for every killTask() of a given task. Instead, this map allows us to
// track whether an existing TaskReaper fulfills the role of a TaskReaper that we would otherwise
// create. The map key is a task id.
private val taskReaperForTask: HashMap[Long, TaskReaper] = HashMap[Long, TaskReaper]()
if (!isLocal) {
env.metricsSystem.registerSource(executorSource)
env.blockManager.initialize(conf.getAppId)
}
// Whether to load classes in user jars before those in Spark jars
private val userClassPathFirst = conf.getBoolean("spark.executor.userClassPathFirst", false)
// Whether to monitor killed / interrupted tasks
private val taskReaperEnabled = conf.getBoolean("spark.task.reaper.enabled", false)
// Create our ClassLoader
// do this after SparkEnv creation so can access the SecurityManager
private val urlClassLoader = createClassLoader()
private val replClassLoader = addReplClassLoaderIfNeeded(urlClassLoader)
// Set the classloader for serializer
env.serializer.setDefaultClassLoader(replClassLoader)
// Max size of direct result. If task result is bigger than this, we use the block manager
// to send the result back.
private val maxDirectResultSize = Math.min(
conf.getSizeAsBytes("spark.task.maxDirectResultSize", 1L << 20),
RpcUtils.maxMessageSizeBytes(conf))
// Limit of bytes for total size of results (default is 1GB)
private val maxResultSize = Utils.getMaxResultSize(conf)
// Maintains the list of running tasks.
private val runningTasks = new ConcurrentHashMap[Long, TaskRunner]
// Executor for the heartbeat task.
private val heartbeater = ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-heartbeater")
// must be initialized before running startDriverHeartbeat()
private val heartbeatReceiverRef =
RpcUtils.makeDriverRef(HeartbeatReceiver.ENDPOINT_NAME, conf, env.rpcEnv)
/**
* When an executor is unable to send heartbeats to the driver more than `HEARTBEAT_MAX_FAILURES`
* times, it should kill itself. The default value is 60. It means we will retry to send
* heartbeats about 10 minutes because the heartbeat interval is 10s.
*/
private val HEARTBEAT_MAX_FAILURES = conf.getInt("spark.executor.heartbeat.maxFailures", 60)
/**
* Count the failure times of heartbeat. It should only be accessed in the heartbeat thread. Each
* successful heartbeat will reset it to 0.
*/
private var heartbeatFailures = 0
startDriverHeartbeater()
private[executor] def numRunningTasks: Int = runningTasks.size()
def launchTask(context: ExecutorBackend, taskDescription: TaskDescription): Unit = {
val tr = new TaskRunner(context, taskDescription)
runningTasks.put(taskDescription.taskId, tr)
threadPool.execute(tr)
}
def killTask(taskId: Long, interruptThread: Boolean, reason: String): Unit = {
val taskRunner = runningTasks.get(taskId)
if (taskRunner != null) {
if (taskReaperEnabled) {
val maybeNewTaskReaper: Option[TaskReaper] = taskReaperForTask.synchronized {
val shouldCreateReaper = taskReaperForTask.get(taskId) match {
case None => true
case Some(existingReaper) => interruptThread && !existingReaper.interruptThread
}
if (shouldCreateReaper) {
val taskReaper = new TaskReaper(
taskRunner, interruptThread = interruptThread, reason = reason)
taskReaperForTask(taskId) = taskReaper
Some(taskReaper)
} else {
None
}
}
// Execute the TaskReaper from outside of the synchronized block.
maybeNewTaskReaper.foreach(taskReaperPool.execute)
} else {
taskRunner.kill(interruptThread = interruptThread, reason = reason)
}
}
}
/**
* Function to kill the running tasks in an executor.
* This can be called by executor back-ends to kill the
* tasks instead of taking the JVM down.
* @param interruptThread whether to interrupt the task thread
*/
def killAllTasks(interruptThread: Boolean, reason: String) : Unit = {
runningTasks.keys().asScala.foreach(t =>
killTask(t, interruptThread = interruptThread, reason = reason))
}
def stop(): Unit = {
env.metricsSystem.report()
heartbeater.shutdown()
heartbeater.awaitTermination(10, TimeUnit.SECONDS)
threadPool.shutdown()
if (!isLocal) {
env.stop()
}
}
/** Returns the total amount of time this JVM process has spent in garbage collection. */
private def computeTotalGcTime(): Long = {
ManagementFactory.getGarbageCollectorMXBeans.asScala.map(_.getCollectionTime).sum
}
class TaskRunner(
execBackend: ExecutorBackend,
private val taskDescription: TaskDescription)
extends Runnable {
val taskId = taskDescription.taskId
val threadName = s"Executor task launch worker for task $taskId"
private val taskName = taskDescription.name
/** If specified, this task has been killed and this option contains the reason. */
@volatile private var reasonIfKilled: Option[String] = None
@volatile private var threadId: Long = -1
def getThreadId: Long = threadId
/** Whether this task has been finished. */
@GuardedBy("TaskRunner.this")
private var finished = false
def isFinished: Boolean = synchronized { finished }
/** How much the JVM process has spent in GC when the task starts to run. */
@volatile var startGCTime: Long = _
/**
* The task to run. This will be set in run() by deserializing the task binary coming
* from the driver. Once it is set, it will never be changed.
*/
@volatile var task: Task[Any] = _
def kill(interruptThread: Boolean, reason: String): Unit = {
logInfo(s"Executor is trying to kill $taskName (TID $taskId), reason: $reason")
reasonIfKilled = Some(reason)
if (task != null) {
synchronized {
if (!finished) {
task.kill(interruptThread, reason)
}
}
}
}
/**
* Set the finished flag to true and clear the current thread's interrupt status
*/
private def setTaskFinishedAndClearInterruptStatus(): Unit = synchronized {
this.finished = true
// SPARK-14234 - Reset the interrupted status of the thread to avoid the
// ClosedByInterruptException during execBackend.statusUpdate which causes
// Executor to crash
Thread.interrupted()
// Notify any waiting TaskReapers. Generally there will only be one reaper per task but there
// is a rare corner-case where one task can have two reapers in case cancel(interrupt=False)
// is followed by cancel(interrupt=True). Thus we use notifyAll() to avoid a lost wakeup:
notifyAll()
}
override def run(): Unit = {
threadId = Thread.currentThread.getId
Thread.currentThread.setName(threadName)
val threadMXBean = ManagementFactory.getThreadMXBean
val taskMemoryManager = new TaskMemoryManager(env.memoryManager, taskId)
val deserializeStartTime = System.currentTimeMillis()
val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
Thread.currentThread.setContextClassLoader(replClassLoader)
val ser = env.closureSerializer.newInstance()
logInfo(s"Running $taskName (TID $taskId)")
execBackend.statusUpdate(taskId, TaskState.RUNNING, EMPTY_BYTE_BUFFER)
var taskStart: Long = 0
var taskStartCpu: Long = 0
startGCTime = computeTotalGcTime()
try {
// Must be set before updateDependencies() is called, in case fetching dependencies
// requires access to properties contained within (e.g. for access control).
Executor.taskDeserializationProps.set(taskDescription.properties)
updateDependencies(taskDescription.addedFiles, taskDescription.addedJars)
task = ser.deserialize[Task[Any]](
taskDescription.serializedTask, Thread.currentThread.getContextClassLoader)
task.localProperties = taskDescription.properties
task.setTaskMemoryManager(taskMemoryManager)
// If this task has been killed before we deserialized it, let's quit now. Otherwise,
// continue executing the task.
val killReason = reasonIfKilled
if (killReason.isDefined) {
// Throw an exception rather than returning, because returning within a try{} block
// causes a NonLocalReturnControl exception to be thrown. The NonLocalReturnControl
// exception will be caught by the catch block, leading to an incorrect ExceptionFailure
// for the task.
throw new TaskKilledException(killReason.get)
}
// The purpose of updating the epoch here is to invalidate executor map output status cache
// in case FetchFailures have occurred. In local mode `env.mapOutputTracker` will be
// MapOutputTrackerMaster and its cache invalidation is not based on epoch numbers so
// we don't need to make any special calls here.
if (!isLocal) {
logDebug("Task " + taskId + "'s epoch is " + task.epoch)
env.mapOutputTracker.asInstanceOf[MapOutputTrackerWorker].updateEpoch(task.epoch)
}
// Run the actual task and measure its runtime.
taskStart = System.currentTimeMillis()
taskStartCpu = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
var threwException = true
val value = try {
val res = task.run(
taskAttemptId = taskId,
attemptNumber = taskDescription.attemptNumber,
metricsSystem = env.metricsSystem)
threwException = false
res
} finally {
val releasedLocks = env.blockManager.releaseAllLocksForTask(taskId)
val freedMemory = taskMemoryManager.cleanUpAllAllocatedMemory()
if (freedMemory > 0 && !threwException) {
val errMsg = s"Managed memory leak detected; size = $freedMemory bytes, TID = $taskId"
if (conf.getBoolean("spark.unsafe.exceptionOnMemoryLeak", false)) {
throw new SparkException(errMsg)
} else {
logWarning(errMsg)
}
}
if (releasedLocks.nonEmpty && !threwException) {
val errMsg =
s"${releasedLocks.size} block locks were not released by TID = $taskId:\\n" +
releasedLocks.mkString("[", ", ", "]")
if (conf.getBoolean("spark.storage.exceptionOnPinLeak", false)) {
throw new SparkException(errMsg)
} else {
logInfo(errMsg)
}
}
}
task.context.fetchFailed.foreach { fetchFailure =>
// uh-oh. it appears the user code has caught the fetch-failure without throwing any
// other exceptions. Its *possible* this is what the user meant to do (though highly
// unlikely). So we will log an error and keep going.
logError(s"TID ${taskId} completed successfully though internally it encountered " +
s"unrecoverable fetch failures! Most likely this means user code is incorrectly " +
s"swallowing Spark's internal ${classOf[FetchFailedException]}", fetchFailure)
}
val taskFinish = System.currentTimeMillis()
val taskFinishCpu = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
// If the task has been killed, let's fail it.
task.context.killTaskIfInterrupted()
val resultSer = env.serializer.newInstance()
val beforeSerialization = System.currentTimeMillis()
val valueBytes = resultSer.serialize(value)
val afterSerialization = System.currentTimeMillis()
// Deserialization happens in two parts: first, we deserialize a Task object, which
// includes the Partition. Second, Task.run() deserializes the RDD and function to be run.
task.metrics.setExecutorDeserializeTime(
(taskStart - deserializeStartTime) + task.executorDeserializeTime)
task.metrics.setExecutorDeserializeCpuTime(
(taskStartCpu - deserializeStartCpuTime) + task.executorDeserializeCpuTime)
// We need to subtract Task.run()'s deserialization time to avoid double-counting
task.metrics.setExecutorRunTime((taskFinish - taskStart) - task.executorDeserializeTime)
task.metrics.setExecutorCpuTime(
(taskFinishCpu - taskStartCpu) - task.executorDeserializeCpuTime)
task.metrics.setJvmGCTime(computeTotalGcTime() - startGCTime)
task.metrics.setResultSerializationTime(afterSerialization - beforeSerialization)
// Note: accumulator updates must be collected after TaskMetrics is updated
val accumUpdates = task.collectAccumulatorUpdates()
// TODO: do not serialize value twice
val directResult = new DirectTaskResult(valueBytes, accumUpdates)
val serializedDirectResult = ser.serialize(directResult)
val resultSize = serializedDirectResult.limit
// directSend = sending directly back to the driver
val serializedResult: ByteBuffer = {
if (maxResultSize > 0 && resultSize > maxResultSize) {
logWarning(s"Finished $taskName (TID $taskId). Result is larger than maxResultSize " +
s"(${Utils.bytesToString(resultSize)} > ${Utils.bytesToString(maxResultSize)}), " +
s"dropping it.")
ser.serialize(new IndirectTaskResult[Any](TaskResultBlockId(taskId), resultSize))
} else if (resultSize > maxDirectResultSize) {
val blockId = TaskResultBlockId(taskId)
env.blockManager.putBytes(
blockId,
new ChunkedByteBuffer(serializedDirectResult.duplicate()),
StorageLevel.MEMORY_AND_DISK_SER)
logInfo(
s"Finished $taskName (TID $taskId). $resultSize bytes result sent via BlockManager)")
ser.serialize(new IndirectTaskResult[Any](blockId, resultSize))
} else {
logInfo(s"Finished $taskName (TID $taskId). $resultSize bytes result sent to driver")
serializedDirectResult
}
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FINISHED, serializedResult)
} catch {
case t: Throwable if hasFetchFailure && !Utils.isFatalError(t) =>
val reason = task.context.fetchFailed.get.toTaskFailedReason
if (!t.isInstanceOf[FetchFailedException]) {
// there was a fetch failure in the task, but some user code wrapped that exception
// and threw something else. Regardless, we treat it as a fetch failure.
val fetchFailedCls = classOf[FetchFailedException].getName
logWarning(s"TID ${taskId} encountered a ${fetchFailedCls} and " +
s"failed, but the ${fetchFailedCls} was hidden by another " +
s"exception. Spark is handling this like a fetch failure and ignoring the " +
s"other exception: $t")
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
case t: TaskKilledException =>
logInfo(s"Executor killed $taskName (TID $taskId), reason: ${t.reason}")
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(TaskKilled(t.reason)))
case _: InterruptedException | NonFatal(_) if
task != null && task.reasonIfKilled.isDefined =>
val killReason = task.reasonIfKilled.getOrElse("unknown reason")
logInfo(s"Executor interrupted and killed $taskName (TID $taskId), reason: $killReason")
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(
taskId, TaskState.KILLED, ser.serialize(TaskKilled(killReason)))
case CausedBy(cDE: CommitDeniedException) =>
val reason = cDE.toTaskFailedReason
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
case t: Throwable =>
// Attempt to exit cleanly by informing the driver of our failure.
// If anything goes wrong (or this was a fatal exception), we will delegate to
// the default uncaught exception handler, which will terminate the Executor.
logError(s"Exception in $taskName (TID $taskId)", t)
// Collect latest accumulator values to report back to the driver
val accums: Seq[AccumulatorV2[_, _]] =
if (task != null) {
task.metrics.setExecutorRunTime(System.currentTimeMillis() - taskStart)
task.metrics.setJvmGCTime(computeTotalGcTime() - startGCTime)
task.collectAccumulatorUpdates(taskFailed = true)
} else {
Seq.empty
}
val accUpdates = accums.map(acc => acc.toInfo(Some(acc.value), None))
val serializedTaskEndReason = {
try {
ser.serialize(new ExceptionFailure(t, accUpdates).withAccums(accums))
} catch {
case _: NotSerializableException =>
// t is not serializable so just send the stacktrace
ser.serialize(new ExceptionFailure(t, accUpdates, false).withAccums(accums))
}
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, serializedTaskEndReason)
// Don't forcibly exit unless the exception was inherently fatal, to avoid
// stopping other tasks unnecessarily.
if (Utils.isFatalError(t)) {
uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), t)
}
} finally {
runningTasks.remove(taskId)
}
}
private def hasFetchFailure: Boolean = {
task != null && task.context != null && task.context.fetchFailed.isDefined
}
}
/**
* Supervises the killing / cancellation of a task by sending the interrupted flag, optionally
* sending a Thread.interrupt(), and monitoring the task until it finishes.
*
* Spark's current task cancellation / task killing mechanism is "best effort" because some tasks
* may not be interruptable or may not respond to their "killed" flags being set. If a significant
* fraction of a cluster's task slots are occupied by tasks that have been marked as killed but
* remain running then this can lead to a situation where new jobs and tasks are starved of
* resources that are being used by these zombie tasks.
*
* The TaskReaper was introduced in SPARK-18761 as a mechanism to monitor and clean up zombie
* tasks. For backwards-compatibility / backportability this component is disabled by default
* and must be explicitly enabled by setting `spark.task.reaper.enabled=true`.
*
* A TaskReaper is created for a particular task when that task is killed / cancelled. Typically
* a task will have only one TaskReaper, but it's possible for a task to have up to two reapers
* in case kill is called twice with different values for the `interrupt` parameter.
*
* Once created, a TaskReaper will run until its supervised task has finished running. If the
* TaskReaper has not been configured to kill the JVM after a timeout (i.e. if
* `spark.task.reaper.killTimeout < 0`) then this implies that the TaskReaper may run indefinitely
* if the supervised task never exits.
*/
private class TaskReaper(
taskRunner: TaskRunner,
val interruptThread: Boolean,
val reason: String)
extends Runnable {
private[this] val taskId: Long = taskRunner.taskId
private[this] val killPollingIntervalMs: Long =
conf.getTimeAsMs("spark.task.reaper.pollingInterval", "10s")
private[this] val killTimeoutMs: Long = conf.getTimeAsMs("spark.task.reaper.killTimeout", "-1")
private[this] val takeThreadDump: Boolean =
conf.getBoolean("spark.task.reaper.threadDump", true)
override def run(): Unit = {
val startTimeMs = System.currentTimeMillis()
def elapsedTimeMs = System.currentTimeMillis() - startTimeMs
def timeoutExceeded(): Boolean = killTimeoutMs > 0 && elapsedTimeMs > killTimeoutMs
try {
// Only attempt to kill the task once. If interruptThread = false then a second kill
// attempt would be a no-op and if interruptThread = true then it may not be safe or
// effective to interrupt multiple times:
taskRunner.kill(interruptThread = interruptThread, reason = reason)
// Monitor the killed task until it exits. The synchronization logic here is complicated
// because we don't want to synchronize on the taskRunner while possibly taking a thread
// dump, but we also need to be careful to avoid races between checking whether the task
// has finished and wait()ing for it to finish.
var finished: Boolean = false
while (!finished && !timeoutExceeded()) {
taskRunner.synchronized {
// We need to synchronize on the TaskRunner while checking whether the task has
// finished in order to avoid a race where the task is marked as finished right after
// we check and before we call wait().
if (taskRunner.isFinished) {
finished = true
} else {
taskRunner.wait(killPollingIntervalMs)
}
}
if (taskRunner.isFinished) {
finished = true
} else {
logWarning(s"Killed task $taskId is still running after $elapsedTimeMs ms")
if (takeThreadDump) {
try {
Utils.getThreadDumpForThread(taskRunner.getThreadId).foreach { thread =>
if (thread.threadName == taskRunner.threadName) {
logWarning(s"Thread dump from task $taskId:\\n${thread.stackTrace}")
}
}
} catch {
case NonFatal(e) =>
logWarning("Exception thrown while obtaining thread dump: ", e)
}
}
}
}
if (!taskRunner.isFinished && timeoutExceeded()) {
if (isLocal) {
logError(s"Killed task $taskId could not be stopped within $killTimeoutMs ms; " +
"not killing JVM because we are running in local mode.")
} else {
// In non-local-mode, the exception thrown here will bubble up to the uncaught exception
// handler and cause the executor JVM to exit.
throw new SparkException(
s"Killing executor JVM because killed task $taskId could not be stopped within " +
s"$killTimeoutMs ms.")
}
}
} finally {
// Clean up entries in the taskReaperForTask map.
taskReaperForTask.synchronized {
taskReaperForTask.get(taskId).foreach { taskReaperInMap =>
if (taskReaperInMap eq this) {
taskReaperForTask.remove(taskId)
} else {
// This must have been a TaskReaper where interruptThread == false where a subsequent
// killTask() call for the same task had interruptThread == true and overwrote the
// map entry.
}
}
}
}
}
}
/**
* Create a ClassLoader for use in tasks, adding any JARs specified by the user or any classes
* created by the interpreter to the search path
*/
private def createClassLoader(): MutableURLClassLoader = {
// Bootstrap the list of jars with the user class path.
val now = System.currentTimeMillis()
userClassPath.foreach { url =>
currentJars(url.getPath().split("/").last) = now
}
val currentLoader = Utils.getContextOrSparkClassLoader
// For each of the jars in the jarSet, add them to the class loader.
// We assume each of the files has already been fetched.
val urls = userClassPath.toArray ++ currentJars.keySet.map { uri =>
new File(uri.split("/").last).toURI.toURL
}
if (userClassPathFirst) {
new ChildFirstURLClassLoader(urls, currentLoader)
} else {
new MutableURLClassLoader(urls, currentLoader)
}
}
/**
* If the REPL is in use, add another ClassLoader that will read
* new classes defined by the REPL as the user types code
*/
private def addReplClassLoaderIfNeeded(parent: ClassLoader): ClassLoader = {
val classUri = conf.get("spark.repl.class.uri", null)
if (classUri != null) {
logInfo("Using REPL class URI: " + classUri)
try {
val _userClassPathFirst: java.lang.Boolean = userClassPathFirst
val klass = Utils.classForName("org.apache.spark.repl.ExecutorClassLoader")
.asInstanceOf[Class[_ <: ClassLoader]]
val constructor = klass.getConstructor(classOf[SparkConf], classOf[SparkEnv],
classOf[String], classOf[ClassLoader], classOf[Boolean])
constructor.newInstance(conf, env, classUri, parent, _userClassPathFirst)
} catch {
case _: ClassNotFoundException =>
logError("Could not find org.apache.spark.repl.ExecutorClassLoader on classpath!")
System.exit(1)
null
}
} else {
parent
}
}
/**
* Download any missing dependencies if we receive a new set of files and JARs from the
* SparkContext. Also adds any new JARs we fetched to the class loader.
*/
private def updateDependencies(newFiles: Map[String, Long], newJars: Map[String, Long]) {
lazy val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
synchronized {
// Fetch missing dependencies
for ((name, timestamp) <- newFiles if currentFiles.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentFiles(name) = timestamp
}
for ((name, timestamp) <- newJars) {
val localName = new URI(name).getPath.split("/").last
val currentTimeStamp = currentJars.get(name)
.orElse(currentJars.get(localName))
.getOrElse(-1L)
if (currentTimeStamp < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentJars(name) = timestamp
// Add it to our class loader
val url = new File(SparkFiles.getRootDirectory(), localName).toURI.toURL
if (!urlClassLoader.getURLs().contains(url)) {
logInfo("Adding " + url + " to class loader")
urlClassLoader.addURL(url)
}
}
}
}
}
/** Reports heartbeat and metrics for active tasks to the driver. */
private def reportHeartBeat(): Unit = {
// list of (task id, accumUpdates) to send back to the driver
val accumUpdates = new ArrayBuffer[(Long, Seq[AccumulatorV2[_, _]])]()
val curGCTime = computeTotalGcTime()
for (taskRunner <- runningTasks.values().asScala) {
if (taskRunner.task != null) {
taskRunner.task.metrics.mergeShuffleReadMetrics()
taskRunner.task.metrics.setJvmGCTime(curGCTime - taskRunner.startGCTime)
accumUpdates += ((taskRunner.taskId, taskRunner.task.metrics.accumulators()))
}
}
val message = Heartbeat(executorId, accumUpdates.toArray, env.blockManager.blockManagerId)
try {
val response = heartbeatReceiverRef.askSync[HeartbeatResponse](
message, RpcTimeout(conf, "spark.executor.heartbeatInterval", "10s"))
if (response.reregisterBlockManager) {
logInfo("Told to re-register on heartbeat")
env.blockManager.reregister()
}
heartbeatFailures = 0
} catch {
case NonFatal(e) =>
logWarning("Issue communicating with driver in heartbeater", e)
heartbeatFailures += 1
if (heartbeatFailures >= HEARTBEAT_MAX_FAILURES) {
logError(s"Exit as unable to send heartbeats to driver " +
s"more than $HEARTBEAT_MAX_FAILURES times")
System.exit(ExecutorExitCode.HEARTBEAT_FAILURE)
}
}
}
/**
* Schedules a task to report heartbeat and partial metrics for active tasks to driver.
*/
private def startDriverHeartbeater(): Unit = {
val intervalMs = conf.getTimeAsMs("spark.executor.heartbeatInterval", "10s")
// Wait a random interval so the heartbeats don't end up in sync
val initialDelay = intervalMs + (math.random * intervalMs).asInstanceOf[Int]
val heartbeatTask = new Runnable() {
override def run(): Unit = Utils.logUncaughtExceptions(reportHeartBeat())
}
heartbeater.scheduleAtFixedRate(heartbeatTask, initialDelay, intervalMs, TimeUnit.MILLISECONDS)
}
}
private[spark] object Executor {
// This is reserved for internal use by components that need to read task properties before a
// task is fully deserialized. When possible, the TaskContext.getLocalProperty call should be
// used instead.
val taskDeserializationProps: ThreadLocal[Properties] = new ThreadLocal[Properties]
}
| VigneshMohan1/spark-branch-2.3 | core/src/main/scala/org/apache/spark/executor/Executor.scala | Scala | apache-2.0 | 34,715 |
package vm.interpreter.impl
import org.apache.bcel.generic.DUP_X2
import sai.vm.OpStack
import vm.Frame
import vm.interpreter.InterpreterBuilder
import vm.interpreter.InstructionInterpreter
private[interpreter] object DupX2Interpreter extends InterpreterBuilder[DUP_X2] {
override def apply(i: DUP_X2): InstructionInterpreter = {
case frame @ Frame(_, _, stack, _, _, _) =>
val updatedStack = (stack: @unchecked) match {
case OpStack(v1 :: v2 :: v3 :: rest) =>
OpStack(v1 :: v2 :: v3 :: v1 :: rest)
}
frame.copy(stack = updatedStack)
}
}
| oliverhaase/sai | src/sai/vm/interpreter/impl/DupX2Interpreter.scala | Scala | mit | 585 |
package play.modules.reactivemongo
import javax.inject.Inject
import scala.util.{ Failure, Success }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future }
import akka.actor.ActorSystem
import play.api.inject.ApplicationLifecycle
import play.api.{ Configuration, Logger }
import reactivemongo.api.{
DefaultDB,
DB,
MongoConnection,
MongoConnectionOptions,
MongoDriver,
ScramSha1Authentication
}
import reactivemongo.api.gridfs.GridFS
import reactivemongo.core.nodeset.Authenticate
import play.modules.reactivemongo.json.JSONSerializationPack
/**
* MongoDB API
*/
trait ReactiveMongoApi {
def driver: MongoDriver
def connection: MongoConnection
def db: DefaultDB
def gridFS: GridFS[JSONSerializationPack.type]
}
/**
* Default implementation of ReactiveMongoApi.
*/
final class DefaultReactiveMongoApi @Inject() (
actorSystem: ActorSystem,
configuration: Configuration,
applicationLifecycle: ApplicationLifecycle) extends ReactiveMongoApi {
import DefaultReactiveMongoApi._
override lazy val driver = new MongoDriver(Some(configuration.underlying))
override lazy val connection = driver.connection(parsedUri)
override lazy val db: DefaultDB = {
Logger.info("ReactiveMongoApi starting...")
parsedUri.db.fold[DefaultDB](throw configuration.globalError(
s"cannot resolve database from URI: $parsedUri")) { dbUri =>
val db = DB(dbUri, connection)
registerDriverShutdownHook(connection, driver)
Logger.info(s"""ReactiveMongoApi successfully started with DB '$dbUri'! Servers:\\n\\t\\t${parsedUri.hosts.map { s => s"[${s._1}:${s._2}]" }.mkString("\\n\\t\\t")}""")
db
}
}
def gridFS = {
import play.modules.reactivemongo.json.collection._
GridFS[JSONSerializationPack.type](db)
}
private lazy val parsedUri = parseConf(configuration)
private def registerDriverShutdownHook(connection: MongoConnection, mongoDriver: MongoDriver): Unit = applicationLifecycle.addStopHook { () =>
Future {
Logger.info("ReactiveMongoApi stopping...")
val f = connection.askClose()(10.seconds)
f.onComplete {
case e => Logger.info(s"ReactiveMongoApi connections stopped. [$e]")
}
Await.ready(f, 10.seconds)
mongoDriver.close()
}
}
}
private[reactivemongo] object DefaultReactiveMongoApi {
val DefaultPort = 27017
val DefaultHost = "localhost:27017"
private def parseLegacy(configuration: Configuration): MongoConnection.ParsedURI = {
val db = configuration.getString("mongodb.db").getOrElse(throw configuration.globalError("Missing configuration key 'mongodb.db'!"))
val uris = configuration.getStringList("mongodb.servers") match {
case Some(list) => scala.collection.JavaConversions.collectionAsScalaIterable(list).toList
case None => List(DefaultHost)
}
val nodes = uris.map { uri =>
uri.split(':').toList match {
case host :: port :: Nil => host -> {
try {
val p = port.toInt
if (p > 0 && p < 65536) p
else throw configuration.globalError(s"Could not parse URI '$uri': invalid port '$port'")
} catch {
case _: NumberFormatException => throw configuration.globalError(s"Could not parse URI '$uri': invalid port '$port'")
}
}
case host :: Nil => host -> DefaultPort
case _ => throw configuration.globalError(s"Could not parse host '$uri'")
}
}
var opts = MongoConnectionOptions()
configuration.getInt("mongodb.options.nbChannelsPerNode").
foreach { nb => opts = opts.copy(nbChannelsPerNode = nb) }
configuration.getString("mongodb.options.authSource").
foreach { src => opts = opts.copy(authSource = Some(src)) }
configuration.getInt("mongodb.options.connectTimeoutMS").
foreach { ms => opts = opts.copy(connectTimeoutMS = ms) }
configuration.getBoolean("mongodb.options.tcpNoDelay").
foreach { delay => opts = opts.copy(tcpNoDelay = delay) }
configuration.getBoolean("mongodb.options.keepAlive").
foreach { keepAlive => opts = opts.copy(keepAlive = keepAlive) }
configuration.getBoolean("mongodb.options.ssl.enabled").
foreach { ssl => opts = opts.copy(sslEnabled = ssl) }
configuration.getBoolean("mongodb.options.ssl.allowsInvalidCert").
foreach { allows => opts = opts.copy(sslAllowsInvalidCert = allows) }
configuration.getString("mongodb.options.authMode").foreach {
case "scram-sha1" =>
opts = opts.copy(authMode = ScramSha1Authentication)
case _ => ()
}
val authenticate: Option[Authenticate] = for {
username <- configuration.getString("mongodb.credentials.username")
password <- configuration.getString("mongodb.credentials.password")
} yield Authenticate(opts.authSource.getOrElse(db), username, password)
MongoConnection.ParsedURI(
hosts = nodes,
options = opts,
ignoredOptions = Nil,
db = Some(db),
authenticate = authenticate)
}
def parseConf(configuration: Configuration): MongoConnection.ParsedURI =
configuration.getString("mongodb.uri") match {
case Some(uri) => MongoConnection.parseURI(uri) match {
case Success(parsedURI) if parsedURI.db.isDefined =>
parsedURI
case Success(_) =>
throw configuration.globalError(s"Missing database name in mongodb.uri '$uri'")
case Failure(e) => throw configuration.globalError(s"Invalid mongodb.uri '$uri'", Some(e))
}
case _ => parseLegacy(configuration)
}
}
| duncancrawford/Play-Json-ReactiveMongo | src/main/scala/play/modules/reactivemongo/ReactiveMongoApi.scala | Scala | apache-2.0 | 5,662 |
import javax.ws.rs.{GET, Path, PathParam, Produces}
import javax.xml.bind.annotation.XmlRootElement
import scala.beans.BeanProperty
class RestService {
@GET
@Path("/item/{symbol}")
@Produces(Array("application/xml"))
def item(@PathParam("symbol") symbol: String): StoreItem = StoreItem(symbol, 42.0)
}
@XmlRootElement
case class StoreItem(@BeanProperty var symbol: String,
@BeanProperty var price: Double) {
def this() = this(null, -1)
} | eed3si9n/scalaxb | integration/src/test/scala/RestService.scala | Scala | mit | 471 |
/*
* Copyright 2016 Groupon, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupon.sparklint.events
/**
* @author swhitear
* @since 9/12/16.
*/
trait EventProgressTrackerLike {
val eventProgress: EventProgress
val taskProgress: EventProgress
val stageProgress: EventProgress
val jobProgress: EventProgress
}
| groupon/sparklint | src/main/scala/com/groupon/sparklint/events/EventProgressTrackerLike.scala | Scala | apache-2.0 | 858 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.test.module.output.es
import com.bwsw.common.JsonSerializer
import com.bwsw.sj.common.engine.core.entities.TStreamEnvelope
import com.bwsw.sj.common.engine.core.environment.OutputEnvironmentManager
import com.bwsw.sj.common.engine.core.output.{Entity, OutputStreamingExecutor}
import com.bwsw.sj.engine.core.output.types.es.{ElasticsearchEntityBuilder, IntegerField}
import com.typesafe.scalalogging.Logger
/**
* @author Pavel Tomskikh
*/
class Executor(manager: OutputEnvironmentManager) extends OutputStreamingExecutor[Integer](manager) {
private val logger = Logger(getClass)
private val serializer = new JsonSerializer(ignoreUnknown = true, enableNullForPrimitives = true)
private val property =
serializer.deserialize[Map[String, Any]](manager.options)
.getOrElse(Data.propertyField, "value").asInstanceOf[String]
override def onMessage(envelope: TStreamEnvelope[Integer]): List[Data] = {
logger.debug(s"Got envelope ${envelope.id} with elements ${envelope.data.toList.mkString(", ")}")
envelope.data.toList.map(i => new Data(property, i))
}
override def getOutputEntity: Entity[String] = {
new ElasticsearchEntityBuilder()
.field(new IntegerField(property))
.build()
}
}
| bwsw/sj-platform | tests/pipeline/sj-output-es-test/src/main/scala/com/bwsw/sj/test/module/output/es/Executor.scala | Scala | apache-2.0 | 2,063 |
/*
* Copyright (c) 2013 Aviat Networks.
* This file is part of DocReg+Web. Please refer to the NOTICE.txt file for license details.
*/
package vvv.docreg.agent
import org.jboss.netty.buffer.ChannelBuffer
import net.liftweb.common.Loggable
import akka.actor.{PoisonPill, ActorRef, Actor}
trait DaemonAgent extends Actor
case class RequestPackage(replyTo: ActorRef, target: String, request: Request)
case class ReplyPackage(header: Header, reply: Reply)
class DaemonAgentImpl extends DaemonAgent with DaemonProtocol with Loggable
{
var previousTransaction: Int = 0
val consumers = List(self)
var outstandingTransactions: Map[Int, ActorRef] = Map.empty
def nextTransaction(): Int =
{
if (previousTransaction < (Int.MaxValue - 1))
{
previousTransaction = previousTransaction + 1
}
else
{
previousTransaction = 0
}
previousTransaction
}
def receive = {
case RequestPackage(replyTo, target, request) =>
{
val encoding = request match {
// todo yuck, these should be encode stacks in the protocol.
case x: NextChangeRequest =>
{
val e = new NextChangeRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case x: RegisterRequest =>
{
val e = new RegisterRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case x: SubmitRequest =>
{
val e = new SubmitRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case x: EditRequest =>
{
val e = new EditRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case x: UneditRequest =>
{
val e = new UneditRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case x: SubscribeRequest =>
{
val e = new SubscribeRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case x: UnsubscribeRequest =>
{
val e = new UnsubscribeRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case x: ApprovalRequest =>
{
val e = new ApprovalRequestEncoder{}
Some((e.messageType, (buffer: ChannelBuffer) => e.encode(x, buffer)))
}
case _ =>
{
None
}
}
// Todo, unedit doesn't have a reply, so no outstanding transaction!!!!!!!!
encoding.foreach{ x =>
val id = nextTransaction()
outstandingTransactions += (id -> replyTo)
transmit(target, DownstreamMessage(
Header(DaemonProtocol.protocolVersion, x._1, id, 1),
x._2
))
}
}
case ReplyPackage(Header(_, _, transaction, _), reply) =>
{
outstandingTransactions.get(transaction).foreach{ replyTo =>
outstandingTransactions -= transaction
replyTo ! reply
}
}
case 'Die =>
{
logger.info("DaemonAgent killed")
close()
self ! PoisonPill
}
case _ =>
{
// Ignored message.
}
}
}
trait DaemonAgentComponent
{
val daemonAgent: ActorRef
}
//trait DaemonAgentComponentImpl extends DaemonAgentComponent
//{
// val daemonAgent = new DaemonAgentImpl()
//} | scott-abernethy/docreg-web | src/main/scala/vvv/docreg/agent/DaemonAgent.scala | Scala | gpl-3.0 | 3,800 |
package org.scalafmt.sysops
import java.io.File
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import munit.{Assertions, FunSuite, Location}
import org.scalafmt.util.DeleteTree.deleteTree
import scala.util._
class GitOpsTest extends FunSuite {
import GitOpsTest._
val root = AbsoluteFile.userDir
val dirName = "gitTestDir"
// DESNOTE(2017-08-16, pjrt): Create a temporary git directory for each
// test.
private implicit var ops: GitOpsImpl = _
private var path: AbsoluteFile = _
private var initFile: AbsoluteFile = _
override def beforeEach(context: BeforeEach): Unit = {
path = AbsoluteFile(Files.createTempDirectory(dirName))
ops = new GitOpsImpl(path)
init(ops)
// initial commit is needed
initFile = touch("initialfile")
add(initFile)(ops)
commit(ops)
}
override def afterEach(context: AfterEach): Unit = {
try {
deleteTree(path.path)
} catch {
case e: Throwable =>
println("Unable to delete test files")
e.printStackTrace()
}
}
private def touch(dir: AbsoluteFile): AbsoluteFile = touch(dir = Some(dir))
private def touch(
name: String = Random.alphanumeric.take(10).mkString,
dir: Option[AbsoluteFile] = None
): AbsoluteFile = {
val d = dir.orElse(ops.rootDir).get.jfile
val f = File.createTempFile(name, ".ext", d)
f.deleteOnExit()
AbsoluteFile(f)
}
def symbolicLinkTo(
file: AbsoluteFile,
name: String = Random.alphanumeric.take(10).mkString,
dir: Option[AbsoluteFile] = None
): AbsoluteFile = {
val linkFile =
File.createTempFile(name, ".ext", dir.orElse(ops.rootDir).get.jfile)
linkFile.delete()
val link = AbsoluteFile(linkFile)
Files.createSymbolicLink(link.path, file.path)
link
}
def mv(f: AbsoluteFile, dir: Option[AbsoluteFile] = None): AbsoluteFile = {
val destDir = Files.createTempDirectory(
dir.orElse(ops.rootDir).get.path,
"dir_"
)
val dest = Files.move(
f.path,
destDir,
java.nio.file.StandardCopyOption.REPLACE_EXISTING
)
rm(f)
AbsoluteFile(dest)
}
def modify(f: AbsoluteFile): Unit = {
val text = Random.alphanumeric.take(10).mkString
f.writeFile(text)(StandardCharsets.UTF_8)
}
def ls(implicit ops: GitOpsImpl) =
// DESNOTE(2017-08-17, pjrt): Filter out the initial file since it will
// just annoy us in the tests below
ops
.lsTree(ops.workingDirectory)
.filterNot(_ == initFile)
def mkDir(
dirName: String = Random.alphanumeric.take(10).mkString
): AbsoluteFile = {
val file = ops.rootDir.getOrElse(ops.workingDirectory) / dirName
file.mkdir()
file
}
test("lsTree should not return files not added to the index") {
touch()
assertEquals(ls, Seq.empty)
}
test("#1010: lsTree should return staged files") {
val f = touch()
add(f)
val q = ls
assertEquals(q.toSet, Set(f), q.mkString + " != " + f.toString())
}
test("lsTree should return committed files") {
val f = touch()
add(f)
commit
assertEquals(ls.toSet, Set(f))
}
test("lsTree should exclude symbolic links") {
val f = touch()
add(f)
val g = symbolicLinkTo(f)
add(g)
commit
assertEquals(ls.toSet, Set(f))
}
test("lsTree should not return committed files that have been deleted") {
val f = touch()
add(f)
commit
rm(f)
assertEquals(ls, Seq.empty)
}
test(
"lsTree should return files properly when the working directory is under the git root directory"
) {
val f1 = touch()
add(f1)
val innerDir = mkDir()
val f2 = touch(innerDir)
add(f2)
val innerGitOps = new GitOpsImpl(innerDir)
assertEquals(ls(innerGitOps).toSet, Set(f2))
}
test("lsTree should return committed files that have been modified") {
val f = touch()
add(f)
commit
modify(f)
assertEquals(ls.toSet, Set(f))
}
def diff(br: String, cwd: AbsoluteFile*)(implicit
ops: GitOpsImpl
): Seq[AbsoluteFile] =
ops.diff(br, cwd: _*)
def diff(cwd: AbsoluteFile*)(implicit ops: GitOpsImpl): Seq[AbsoluteFile] =
diff("HEAD", cwd: _*)
def status(cwd: AbsoluteFile*)(implicit ops: GitOpsImpl): Seq[AbsoluteFile] =
ops.status(cwd: _*)
// diff
test("diff should return modified committed files") {
val f = touch()
add(f)
commit
modify(f)
assertEquals(diff().toSet, Set(f))
}
test("diff should return modified files from specific subdirs") {
val d1 = mkDir()
val d2 = mkDir()
val d3 = mkDir()
val f0 = touch()
val f1 = touch(dir = d1)
val f2 = touch(dir = d2)
val f3 = touch(dir = d3)
add(f0, f1, f2, f3)
assertEquals(diff(d1, d2).toSet, Set(f1, f2))
assertEquals(diff().toSet, Set(f0, f1, f2, f3))
assertEquals(diff(path).toSet, Set(f0, f1, f2, f3))
}
test("#1000: diff should not return git deleted files") {
val f = touch()
add(f)
commit
rm(f)
assertEquals(diff(), Seq.empty)
}
test("#1000: diff should not return fs deleted files") {
val f = touch()
add(f)
commit
rmfs(f)
assertEquals(diff(), Seq.empty)
}
test("diff should return added files against HEAD") {
val dir = mkDir("dir 1")
val f1 = touch()
val f2 = touch(dir = dir)
add(f1)
add(f2)
assertEquals(diff().toSet, Set(f1, f2))
assertEquals(diff(cwd = dir).toSet, Set(f2))
}
test("diff should return added files against a different branch") {
val f = touch()
add(f)
commit
checkoutBr("other")
val dir = mkDir("dir 1")
val f1 = touch()
val f2 = touch(dir = dir)
add(f1)
add(f2)
commit
assertEquals(diff("master").toSet, Set(f1, f2))
assertEquals(diff("master", dir).toSet, Set(f2))
}
test(
"diff should return added files that are then modified against a different branch"
) {
val f = touch()
add(f)
commit
checkoutBr("other")
val dir = mkDir("dir 1")
val f1 = touch()
val f2 = touch(dir = dir)
add(f1)
add(f2)
modify(f1)
assertEquals(diff("master").toSet, Set(f1, f2))
assertEquals(diff("master", dir).toSet, Set(f2))
}
test("diff should not return removed files against a different branch") {
val f = touch()
add(f)
commit
checkoutBr("other")
val f1 = touch()
val f2 = touch()
add(f1)
add(f2)
commit
rm(f1)
assertEquals(diff("master").toSet, Set(f2))
}
test("status should return only modified files") {
val f = touch()
add(f)
commit
val f1 = touch()
assertEquals(status().toSet, Set(f1))
}
test("status should return modified files from specific subdirs") {
val d1 = mkDir()
val d2 = mkDir()
val d3 = mkDir()
val f0 = touch()
val f1 = touch(dir = d1)
val f2 = touch(dir = d2)
val f3 = touch(dir = d3)
add(f0, f1, f2, f3)
assertEquals(status(d1, d2).toSet, Set(f1, f2))
assertEquals(status().toSet, Set(f0, f1, f2, f3))
assertEquals(status(path).toSet, Set(f0, f1, f2, f3))
}
test("status should return moved") {
val f = touch()
add(f)
commit
val f1 = mv(f)
add(f1)
assertEquals(status().toSet, Set(f1))
}
test("status should not return deleted files") {
val f = touch()
modify(f)
add(f)
commit
val f1 = touch()
modify(f1)
add(f1)
rm(f)
assertEquals(status().toSet, Set(f1))
}
test("status should return files with spaces in the path") {
val dir = mkDir("dir 1")
val f = touch(dir = dir)
add(f)
assertEquals(status().toSet, Set(f))
}
test("lsTree should return files from specific subdirs") {
val d1 = mkDir()
val d2 = mkDir()
val d3 = mkDir()
val f1 = touch(dir = d1)
val f2 = touch(dir = d2)
val f3 = touch(dir = d3)
add(f1, f2, f3)
assertEquals(ops.lsTree(d1, d2).toSet, Set(f1, f2))
assertEquals(ops.lsTree().toSet, Set(initFile, f1, f2, f3))
assertEquals(ops.lsTree(path).toSet, Set(initFile, f1, f2, f3))
}
}
private object GitOpsTest {
// Filesystem commands
def rmfs(file: AbsoluteFile): Unit =
file.delete()
// Git commands
def git(cmd: String, args: String*)(implicit
ops: GitOpsImpl,
loc: Location
): Seq[String] =
Try(ops.exec("git" +: cmd +: args)) match {
case Failure(f) => Assertions.fail(s"Failed git command. Got: $f")
case Success(s) => s
}
def init(implicit ops: GitOpsImpl): Unit =
git("init")
def add(file: AbsoluteFile*)(implicit ops: GitOpsImpl): Unit =
git("add", file.map(_.toString()): _*)
def rm(file: AbsoluteFile*)(implicit ops: GitOpsImpl): Unit =
git("rm", file.map(_.toString()): _*)
def commit(implicit ops: GitOpsImpl): Unit =
git("commit", "-m", "'some-message'")
def checkout(br: String)(implicit ops: GitOpsImpl): Unit =
git("checkout", "$br")
def checkoutBr(newBr: String)(implicit ops: GitOpsImpl): Unit =
git("checkout", "-b", "$newBr")
}
| scalameta/scalafmt | scalafmt-tests/src/test/scala/org/scalafmt/sysops/GitOpsTest.scala | Scala | apache-2.0 | 9,029 |
// Copyright 2015,2016,2017,2018,2019,2020 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commbank.grimlock.test
import commbank.grimlock.framework._
import commbank.grimlock.framework.content._
import commbank.grimlock.framework.encoding._
import commbank.grimlock.framework.environment.implicits._
import commbank.grimlock.framework.environment.tuner._
import commbank.grimlock.framework.metadata._
import commbank.grimlock.framework.position._
import commbank.grimlock.framework.squash._
import commbank.grimlock.library.squash._
import com.twitter.scalding.typed.ValuePipe
import shapeless.{ HList, Nat }
import shapeless.nat.{ _0, _1, _2 }
trait TestMatrixSquash extends TestMatrix {
val ext = "ext"
val result1 = List(
Cell(Position(1), Content(OrdinalSchema[String](), "12.56")),
Cell(Position(2), Content(ContinuousSchema[Double](), 6.28)),
Cell(Position(3), Content(NominalSchema[String](), "9.42")),
Cell(
Position(4),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
)
)
val result2 = List(
Cell(Position("bar"), Content(OrdinalSchema[Long](), 19L)),
Cell(Position("baz"), Content(DiscreteSchema[Long](), 19L)),
Cell(
Position("foo"),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
),
Cell(Position("qux"), Content(OrdinalSchema[String](), "12.56"))
)
val result3 = List(
Cell(Position(1, "xyz"), Content(OrdinalSchema[String](), "12.56")),
Cell(Position(2, "xyz"), Content(ContinuousSchema[Double](), 6.28)),
Cell(Position(3, "xyz"), Content(NominalSchema[String](), "9.42")),
Cell(
Position(4, "xyz"),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
)
)
val result4 = List(
Cell(Position("bar", "xyz"), Content(OrdinalSchema[Long](), 19L)),
Cell(Position("baz", "xyz"), Content(DiscreteSchema[Long](), 19L)),
Cell(
Position("foo", "xyz"),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
),
Cell(Position("qux", "xyz"), Content(OrdinalSchema[String](), "12.56"))
)
val result5 = List(
Cell(Position("bar", 1), Content(OrdinalSchema[String](), "6.28")),
Cell(Position("bar", 2), Content(ContinuousSchema[Double](), 12.56)),
Cell(Position("bar", 3), Content(OrdinalSchema[Long](), 19L)),
Cell(Position("baz", 1), Content(OrdinalSchema[String](), "9.42")),
Cell(Position("baz", 2), Content(DiscreteSchema[Long](), 19L)),
Cell(Position("foo", 1), Content(OrdinalSchema[String](), "3.14")),
Cell(Position("foo", 2), Content(ContinuousSchema[Double](), 6.28)),
Cell(Position("foo", 3), Content(NominalSchema[String](), "9.42")),
Cell(
Position("foo", 4),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
),
Cell(Position("qux", 1), Content(OrdinalSchema[String](), "12.56"))
)
val result6 = List(
Cell(Position(1), Content(OrdinalSchema[String](), "12.56")),
Cell(Position(2), Content(ContinuousSchema[Double](), 6.28)),
Cell(Position(3), Content(NominalSchema[String](), "9.42")),
Cell(
Position(4),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
)
)
val result7 = List(
Cell(Position("bar"), Content(OrdinalSchema[Long](), 19L)),
Cell(Position("baz"), Content(DiscreteSchema[Long](), 19L)),
Cell(
Position("foo"),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
),
Cell(Position("qux"), Content(OrdinalSchema[String](), "12.56"))
)
val result8 = List(
Cell(Position(1, "xyz"), Content(OrdinalSchema[String](), "12.56")),
Cell(Position(2, "xyz"), Content(ContinuousSchema[Double](), 6.28)),
Cell(Position(3, "xyz"), Content(NominalSchema[String](), "9.42")),
Cell(
Position(4, "xyz"),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
)
)
val result9 = List(
Cell(Position("bar", "xyz"), Content(OrdinalSchema[Long](), 19L)),
Cell(Position("baz", "xyz"), Content(DiscreteSchema[Long](), 19L)),
Cell(
Position("foo", "xyz"),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
),
Cell(Position("qux", "xyz"), Content(OrdinalSchema[String](), "12.56"))
)
val result10 = List(
Cell(Position("bar", 1), Content(OrdinalSchema[String](), "6.28")),
Cell(Position("bar", 2), Content(ContinuousSchema[Double](), 12.56)),
Cell(Position("bar", 3), Content(OrdinalSchema[Long](), 19L)),
Cell(Position("baz", 1), Content(OrdinalSchema[String](), "9.42")),
Cell(Position("baz", 2), Content(DiscreteSchema[Long](), 19L)),
Cell(Position("foo", 1), Content(OrdinalSchema[String](), "3.14")),
Cell(Position("foo", 2), Content(ContinuousSchema[Double](), 6.28)),
Cell(Position("foo", 3), Content(NominalSchema[String](), "9.42")),
Cell(
Position("foo", 4),
Content(
DateSchema[java.util.Date](),
DateValue((new java.text.SimpleDateFormat("yyyy-MM-dd hh:mm:ss")).parse("2000-01-01 12:56:00"))
)
),
Cell(Position("qux", 1), Content(OrdinalSchema[String](), "12.56"))
)
}
object TestMatrixSquash {
case class PreservingMaxPositionWithValue[P <: HList]() extends SquasherWithValue[P] {
type V = String
type T = squasher.T
val squasher = PreservingMaximumPosition[P]()
val tTag = squasher.tTag
def prepareWithValue[
D <: Nat
](
cell: Cell[P],
dim: D,
ext: V
)(implicit
ev: Position.IndexConstraints[P, D] { type V <: Value[_] }
): Option[T] = squasher.prepare(cell, dim)
def reduce(lt: T, rt: T): T = squasher.reduce(lt, rt)
def presentWithValue(t: T, ext: V): Option[Content] = if (ext == "ext") squasher.present(t) else None
}
}
class TestScalaMatrixSquash extends TestMatrixSquash with TestScala {
import commbank.grimlock.scala.environment.implicits._
"A Matrix.squash" should "return its first squashed data in 2D" in {
toU(data2)
.squash(_0, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result1
}
it should "return its second squashed data in 2D" in {
toU(data2)
.squash(_1, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result2
}
it should "return its first squashed data in 3D" in {
toU(data3)
.squash(_0, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result3
}
it should "return its second squashed data in 3D" in {
toU(data3)
.squash(_1, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result4
}
it should "return its third squashed data in 3D" in {
toU(data3)
.squash(_2, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result5
}
"A Matrix.squashWithValue" should "return its first squashed data in 2D" in {
toU(data2)
.squashWithValue(_0, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result6
}
it should "return its second squashed data in 2D" in {
toU(data2)
.squashWithValue(_1, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result7
}
it should "return its first squashed data in 3D" in {
toU(data3)
.squashWithValue(_0, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result8
}
it should "return its second squashed data in 3D" in {
toU(data3)
.squashWithValue(_1, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result9
}
it should "return its third squashed data in 3D" in {
toU(data3)
.squashWithValue(_2, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result10
}
}
class TestScaldingMatrixSquash extends TestMatrixSquash with TestScalding {
import commbank.grimlock.scalding.environment.implicits._
"A Matrix.squash" should "return its first squashed data in 2D" in {
toU(data2)
.squash(_0, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result1
}
it should "return its second squashed data in 2D" in {
toU(data2)
.squash(_1, PreservingMaximumPosition(), Default(12))
.toList.sortBy(_.position) shouldBe result2
}
it should "return its first squashed data in 3D" in {
toU(data3)
.squash(_0, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result3
}
it should "return its second squashed data in 3D" in {
toU(data3)
.squash(_1, PreservingMaximumPosition(), Default(12))
.toList.sortBy(_.position) shouldBe result4
}
it should "return its third squashed data in 3D" in {
toU(data3)
.squash(_2, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result5
}
"A Matrix.squashWithValue" should "return its first squashed data in 2D" in {
toU(data2)
.squashWithValue(_0, ValuePipe(ext), TestMatrixSquash.PreservingMaxPositionWithValue(), Default(12))
.toList.sortBy(_.position) shouldBe result6
}
it should "return its second squashed data in 2D" in {
toU(data2)
.squashWithValue(_1, ValuePipe(ext), TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result7
}
it should "return its first squashed data in 3D" in {
toU(data3)
.squashWithValue(_0, ValuePipe(ext), TestMatrixSquash.PreservingMaxPositionWithValue(), Default(12))
.toList.sortBy(_.position) shouldBe result8
}
it should "return its second squashed data in 3D" in {
toU(data3)
.squashWithValue(_1, ValuePipe(ext), TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result9
}
it should "return its third squashed data in 3D" in {
toU(data3)
.squashWithValue(_2, ValuePipe(ext), TestMatrixSquash.PreservingMaxPositionWithValue(), Default(12))
.toList.sortBy(_.position) shouldBe result10
}
}
class TestSparkMatrixSquash extends TestMatrixSquash with TestSpark {
import commbank.grimlock.spark.environment.implicits._
"A Matrix.squash" should "return its first squashed data in 2D" in {
toU(data2)
.squash(_0, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result1
}
it should "return its second squashed data in 2D" in {
toU(data2)
.squash(_1, PreservingMaximumPosition(), Default(12))
.toList.sortBy(_.position) shouldBe result2
}
it should "return its first squashed data in 3D" in {
toU(data3)
.squash(_0, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result3
}
it should "return its second squashed data in 3D" in {
toU(data3)
.squash(_1, PreservingMaximumPosition(), Default(12))
.toList.sortBy(_.position) shouldBe result4
}
it should "return its third squashed data in 3D" in {
toU(data3)
.squash(_2, PreservingMaximumPosition(), Default())
.toList.sortBy(_.position) shouldBe result5
}
"A Matrix.squashWithValue" should "return its first squashed data in 2D" in {
toU(data2)
.squashWithValue(_0, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default(12))
.toList.sortBy(_.position) shouldBe result6
}
it should "return its second squashed data in 2D" in {
toU(data2)
.squashWithValue(_1, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result7
}
it should "return its first squashed data in 3D" in {
toU(data3)
.squashWithValue(_0, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default(12))
.toList.sortBy(_.position) shouldBe result8
}
it should "return its second squashed data in 3D" in {
toU(data3)
.squashWithValue(_1, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default())
.toList.sortBy(_.position) shouldBe result9
}
it should "return its third squashed data in 3D" in {
toU(data3)
.squashWithValue(_2, ext, TestMatrixSquash.PreservingMaxPositionWithValue(), Default(12))
.toList.sortBy(_.position) shouldBe result10
}
}
| CommBank/grimlock | grimlock-core/src/test/scala/commbank/grimlock/matrix/TestMatrixSquash.scala | Scala | apache-2.0 | 13,847 |
package me.heaton.scalaz
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import MonoidTry._
class MonoidTrySpec extends Specification with Mockito {
"try monoid" should {
"have sum of list" in {
sum(List(1, 2, 3, 4)) ==== 10
sum(List("a", "b", "c")) ==== "abc"
}
"pass other moniod into sum" in {
val multiMonoid: Monoid[Int] = new Monoid[Int] {
def mappend(a: Int, b: Int): Int = a * b
def mzero: Int = 1
}
sum3(List(1, 2, 3, 4))(multiMonoid) === 24
}
"inject plus into Int and String " in {
(3 |+| 7) === 10
("A" |+| "BC") === "ABC"
}
}
}
| heaton/hello-scala | src/test/scala/me/heaton/scalaz/MonoidTrySpec.scala | Scala | mit | 659 |
package au.com.dius.pact.provider
import au.com.dius.pact.model.unfiltered.Conversions
import au.com.dius.pact.model.{Request, Response}
import au.com.dius.pact.provider.AnimalServiceResponses.responses
import com.typesafe.scalalogging.StrictLogging
import org.json4s.JsonAST._
import org.json4s.StringInput
import org.json4s.jackson.JsonMethods.parse
import unfiltered.netty.{ReceivedMessage, ServerErrorResponse, cycle}
import unfiltered.request.HttpRequest
import unfiltered.response.ResponseFunction
object TestService extends StrictLogging {
var state: String = ""
case class RequestHandler(port: Int) extends cycle.Plan
with cycle.SynchronousExecution
with ServerErrorResponse {
import io.netty.handler.codec.http.{ HttpResponse=>NHttpResponse }
def handle(request:HttpRequest[ReceivedMessage]): ResponseFunction[NHttpResponse] = {
val response = if(request.uri.endsWith("enterState")) {
val pactRequest: Request = Conversions.unfilteredRequestToPactRequest(request)
val json = parse(StringInput(pactRequest.body.get))
state = (for {
JString(s) <- json \\\\ "state"
} yield s).head
Response(200, None, None, None)
} else {
responses.get(state).flatMap(_.get(request.uri)).getOrElse(Response(400, None, None, None))
}
Conversions.pactToUnfilteredResponse(response)
}
def intent = PartialFunction[HttpRequest[ReceivedMessage], ResponseFunction[NHttpResponse]](handle)
}
def apply(port:Int) = {
val server = _root_.unfiltered.netty.Server.local(port).handler(RequestHandler(port))
logger.info(s"starting unfiltered app at 127.0.0.1 on port $port")
server.start()
server
}
}
| venksub/pact-jvm | pact-jvm-provider/src/test/scala/au/com/dius/pact/provider/TestService.scala | Scala | apache-2.0 | 1,743 |
package io.dylemma.spac
package impl
import cats.data.Chain
import scala.collection.mutable
case class SplitterByContextMatch[In, Elem, C](matcher: ContextMatcher[Elem, C], matcherPos: CallerPos)(implicit S: StackLike[In, Elem]) extends Splitter[In, C] {
val addBoundaries = Transformer
.spacFrame(SpacTraceElement.InSplitter(matcher.toString, matcherPos))
.through(S.interpret)
.through(new SplitterByContextMatch.Boundaries(matcher))
}
object SplitterByContextMatch {
class Boundaries[In, Elem, C](matcher: ContextMatcher[Elem, C]) extends Transformer[Either[ContextChange[In, Elem], In], Either[ContextChange[In, C], In]] {
override def toString = s"SplitterBoundaries($matcher)"
def newHandler = new SplitterByContextMatch.Handler(matcher)
}
/** Splitter implementation detail that re-interprets stack states into contexts.
* As stack elements are pushed and popped via the incoming stream,
* it plugs the accumulated stack into the given `matcher` to determine if a
* matched context has been entered (or exited). The matched context is
* communicated as `ContextChange` events of the `C` type via the outgoing stream.
*/
class Handler[In, Elem, C](matcher: ContextMatcher[Elem, C]) extends Transformer.Handler[Either[ContextChange[In, Elem], In], Either[ContextChange[In, C], In]] {
override def toString = s"Splitter($matcher)"
private val traces = new mutable.ArrayBuffer[ContextTrace[In]]
private val stack = new mutable.ArrayBuffer[Elem]
private var isMatched = false
private var extraDepth = 0
def finish(out: Transformer.HandlerWrite[Either[ContextChange[In, C], In]]): Unit = {
// if we're in a matched context, it needs to be closed (via ContextPop) before we finish
if (isMatched) out.push(Left(ContextPop))
}
def push(in: Either[ContextChange[In, Elem], In], out: Transformer.HandlerWrite[Either[ContextChange[In, C], In]]): Signal = in match {
case Right(in) =>
// just pass the event along, with no change in state
out.push(Right(in))
case Left(ContextPush(incomingTrace, elem)) =>
if (isMatched) {
// already in a matched context, so the push simply increments our extraDepth
extraDepth += 1
Signal.Continue
} else {
// the push may be enough to put us into a matching state; we need to check the `matcher`
traces += incomingTrace
stack += elem
matcher(stack, 0, stack.length) match {
case None =>
// no new context match, just continue
Signal.Continue
case Some(c) =>
// new matched context!
isMatched = true
extraDepth = 0
val combinedTrace = traces.foldLeft(ContextTrace[In](Chain.empty))(_ / _)
val change = ContextPush(combinedTrace, c)
out.push(Left(change))
}
}
case Left(ContextPop) =>
if (isMatched) {
if (extraDepth == 0) {
// matched context has ended; pop from the stack and traces, and emit the pop event
traces.remove(traces.length - 1)
stack.remove(stack.length - 1)
isMatched = false
out.push(Left(ContextPop))
} else {
// still in the matched context, minus one extra depth
extraDepth -= 1
Signal.Continue
}
} else {
// just pop the last values off of the stack/traces
traces.remove(traces.length - 1)
stack.remove(stack.length - 1)
Signal.Continue
}
}
}
}
| dylemma/xml-spac | core/src/main/scala/io/dylemma/spac/impl/SplitterByContextMatch.scala | Scala | mit | 3,392 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.metrics
import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import com.webtrends.harness.component.TestKitSpecificationWithJUnit
import com.webtrends.harness.component.metrics.messages.{MeterObservation, CounterObservation}
import com.webtrends.harness.component.metrics.metrictype.{Counter, Meter}
class MetricsEventBusSpec extends TestKitSpecificationWithJUnit(ActorSystem("harness")) {
val metric = Counter("group.subgroup.name.scope")
val meter = Meter("group.subgroup.name.scope")
sequential
"The event bus should " should {
" allow actors to subscribe and receive metric observations" in {
val probe = new TestProbe(system)
MetricsEventBus.subscribe(probe.ref)
val obs = CounterObservation(metric, 1)
MetricsEventBus.publish(obs)
MetricsEventBus.unsubscribe(probe.ref)
obs must be equalTo probe.expectMsg(obs)
}
" allow actors to subscribe and then un-subscribe" in {
val probe = new TestProbe(system)
MetricsEventBus.subscribe(probe.ref)
val obs = MeterObservation(meter, 1)
MetricsEventBus.publish(obs)
obs must be equalTo probe.expectMsg(obs)
MetricsEventBus.unsubscribe(probe.ref)
MetricsEventBus.publish(obs)
probe.expectNoMsg()
success
}
}
step {
TestKit.shutdownActorSystem(system)
}
}
| Webtrends/wookiee-metrics | src/test/scala/com/webtrends/harness/component/metrics/MetricsEventBusSpec.scala | Scala | apache-2.0 | 2,133 |
package com.nabijaczleweli.minecrasmer.compat.waila
import com.nabijaczleweli.minecrasmer.block.{BlockAccessoryAdditionalCPU, BlockAccessoryOverclocker, ComputerGeneric}
import com.nabijaczleweli.minecrasmer.util.StringUtils._
import mcp.mobius.waila.api.IWailaRegistrar
import net.minecraftforge.fml.relauncher.{Side, SideOnly}
@SideOnly(Side.CLIENT)
object WailaCompatRegisterer {
final val pathToRegisterMethod = (getClass.getName before "$") + ".registerWailaCompats"
@SuppressWarnings(Array("unused"))
def registerWailaCompats(registrar: IWailaRegistrar) {
registrar.registerBodyProvider(ProviderComputer, classOf[ComputerGeneric])
registrar.registerStackProvider(ProviderComputer, classOf[ComputerGeneric])
registrar.registerBodyProvider(ProviderOverclocker, BlockAccessoryOverclocker.getClass)
registrar.registerBodyProvider(ProviderAdditionalCPU, BlockAccessoryAdditionalCPU.getClass)
}
}
| nabijaczleweli/ASMifier | src/main/scala/com/nabijaczleweli/minecrasmer/compat/waila/WailaCompatRegisterer.scala | Scala | mit | 912 |
package org.jetbrains.plugins.scala
package project.notification.source
import java.lang.StringBuilder
import java.lang.StringBuilder
import com.intellij.lexer.Lexer
import com.intellij.openapi.util.text.StringUtil
import com.intellij.util.StringBuilderSpinAllocator
import org.jetbrains.plugins.scala.lang.lexer.{ScalaLexer, ScalaTokenTypes}
/**
* @author Alexander Podkhalyuzin
*/
object ScalaDirUtil {
def getPackageStatement(text: CharSequence): String = {
val lexer: Lexer = new ScalaLexer
lexer.start(text)
val buffer: StringBuilder = StringBuilderSpinAllocator.alloc()
def readPackage(firstTime: Boolean) {
skipWhiteSpaceAndComments(lexer)
if (lexer.getTokenType != ScalaTokenTypes.kPACKAGE) return
if (!firstTime) buffer.append('.')
lexer.advance()
skipWhiteSpaceAndComments(lexer)
if (lexer.getTokenType == ScalaTokenTypes.kOBJECT) {
lexer.advance()
skipWhiteSpaceAndComments(lexer)
if (lexer.getTokenType == ScalaTokenTypes.tIDENTIFIER)
buffer.append(text, lexer.getTokenStart, lexer.getTokenEnd)
return
}
def appendPackageStatement() {
while (true) {
if (lexer.getTokenType != ScalaTokenTypes.tIDENTIFIER) return
buffer.append(text, lexer.getTokenStart, lexer.getTokenEnd)
lexer.advance()
skipWhiteSpaceAndComments(lexer)
if (lexer.getTokenType != ScalaTokenTypes.tDOT) return
buffer.append('.')
lexer.advance()
skipWhiteSpaceAndComments(lexer)
}
}
appendPackageStatement()
if (lexer.getTokenType == ScalaTokenTypes.tLBRACE) {
lexer.advance()
skipWhiteSpaceAndComments(lexer)
}
readPackage(false)
}
try {
readPackage(true)
val packageName: String = buffer.toString
if (packageName.length == 0 || StringUtil.endsWithChar(packageName, '.')) return null
packageName
}
finally {
StringBuilderSpinAllocator.dispose(buffer)
}
}
def skipWhiteSpaceAndComments(lexer: Lexer) {
while (ScalaTokenTypes.WHITES_SPACES_AND_COMMENTS_TOKEN_SET.contains(lexer.getTokenType)) {
lexer.advance()
}
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/project/notification/source/ScalaDirUtil.scala | Scala | apache-2.0 | 2,221 |
package com.softwaremill.reactive.step1
import akka.actor.ActorSystem
import akka.stream.ActorFlowMaterializer
import akka.stream.scaladsl._
import com.softwaremill.reactive._
/**
* - flow from the client, transforming, no response
* - *elastic*: delay to see the backpressure
*/
class ReceiverStep1(host: String, port: Int)(implicit val system: ActorSystem) extends Logging {
def run(): Unit = {
implicit val mat = ActorFlowMaterializer()
logger.info(s"Receiver: binding to $host:$port")
Tcp().bind(host, port).runForeach { conn =>
logger.info(s"Receiver: sender connected (${conn.remoteAddress})")
val receiveSink = conn.flow
.transform(() => new ParseLinesStage("\\n", 4000000))
.filter(_.startsWith("20"))
.map(_.split(","))
.mapConcat(FlightData(_).toList)
.to(Sink.foreach { flightData =>
logger.info("Got data: " + flightData)
Thread.sleep(100L)
})
Source.empty.to(receiveSink).run()
}
}
}
object ReceiverStep1 extends App {
implicit val system = ActorSystem()
new ReceiverStep1("localhost", 9182).run()
}
| adamw/reactive-akka-pres | src/main/scala/com/softwaremill/reactive/step1/ReceiverStep1.scala | Scala | apache-2.0 | 1,132 |
package com.github.ldaniels528.broadway.core.io.flow.impl
import com.github.ldaniels528.broadway.core.io.Scope
import com.github.ldaniels528.broadway.core.io.device.{AsynchronousOutputSupport, InputSource, OutputSource}
import com.github.ldaniels528.broadway.core.io.flow.Flow
import com.github.ldaniels528.broadway.core.io.layout.Layout.InputSet
import com.github.ldaniels528.commons.helpers.OptionHelper.Risky._
import scala.concurrent.{ExecutionContext, Future}
/**
* Represents a simple process flow implementation
* @author lawrence.daniels@gmail.com
*/
case class SimpleFlow(id: String, input: InputSource, output: OutputSource) extends Flow {
override def devices = List(input, output).sortBy(_.id)
override def execute(scope: Scope)(implicit ec: ExecutionContext) = {
implicit val myScope = scope
// open the input and output sources
output.open
input.open
var inputSet: Option[InputSet] = None
do {
// read the input record(s)
inputSet = input.layout.read(input)
// transform the output record(s)
inputSet.foreach(output.layout.write(output, _))
} while (inputSet.exists(!_.isEOF))
// close the input source, but not the output source as it might be asynchronous
input.close
// ask to be notified once all asynchronous writes have completed
val task = output match {
case aos: AsynchronousOutputSupport => aos.allWritesCompleted
case _ => Future.successful(Seq(output))
}
// close the output source once all writes have completed
task onComplete (_ => output.close)
task.map(_ => ())
}
}
| ldaniels528/broadway | app-cli/src/main/scala/com/github/ldaniels528/broadway/core/io/flow/impl/SimpleFlow.scala | Scala | apache-2.0 | 1,618 |
package com.twitter.algebird
import org.specs._
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Properties
import org.scalacheck.Gen.choose
import org.scalacheck.Prop.forAll
object CountMinSketchLaws extends Properties("CountMinSketch") {
import BaseProperties._
val DELTA = 1E-10
val EPS = 0.001
val SEED = 1
implicit val cmsMonoid = new CountMinSketchMonoid(EPS, DELTA, SEED)
implicit val cmsGen =
Arbitrary {
for (v <- choose(0, 10000)) yield (cmsMonoid.create(v))
}
property("CountMinSketch is a Monoid") = monoidLaws[CMS]
}
class CountMinSketchTest extends Specification {
noDetailedDiffs()
val DELTA = 1E-10
val EPS = 0.001
val SEED = 1
val CMS_MONOID = new CountMinSketchMonoid(EPS, DELTA, SEED)
val RAND = new scala.util.Random
/**
* Returns the exact frequency of {x} in {data}.
*/
def exactFrequency(data : Seq[Long], x : Long) : Long = {
data.filter { _ == x }.size
}
/**
* Returns the exact inner product between two data streams, when the streams
* are viewed as count vectors.
*/
def exactInnerProduct(data1 : Seq[Long], data2 : Seq[Long]) : Long = {
val counts1 = data1.groupBy( x => x ).mapValues( _.size )
val counts2 = data2.groupBy( x => x ).mapValues( _.size )
(counts1.keys.toSet & counts2.keys.toSet).map { k => counts1(k) * counts2(k) }.sum
}
/**
* Returns the elements in {data} that appear at least heavyHittersPct * data.size times.
*/
def exactHeavyHitters(data : Seq[Long], heavyHittersPct : Double) : Set[Long] = {
val counts = data.groupBy( x => x ).mapValues( _.size )
val totalCount = counts.values.sum
counts.filter { _._2 >= heavyHittersPct * totalCount }.keys.toSet
}
"CountMinSketch" should {
"count total number of elements in a stream" in {
val totalCount = 1243
val range = 234
val data = (0 to (totalCount - 1)).map { _ => RAND.nextInt(range).toLong }
val cms = CMS_MONOID.create(data)
cms.totalCount must be_==(totalCount)
}
"estimate frequencies" in {
val totalCount = 5678
val range = 897
val data = (0 to (totalCount - 1)).map { _ => RAND.nextInt(range).toLong }
val cms = CMS_MONOID.create(data)
(0 to 100).foreach { _ =>
val x = RAND.nextInt(range).toLong
val exact = exactFrequency(data, x)
val approx = cms.frequency(x).estimate
val maxError = approx - cms.frequency(x).min
approx must be_>=(exact)
(approx - exact) must be_<=(maxError)
}
}
"exactly compute frequencies in a small stream" in {
val one = CMS_MONOID.create(1)
val two = CMS_MONOID.create(2)
val cms = CMS_MONOID.plus(CMS_MONOID.plus(one, two), two)
cms.frequency(0).estimate must be_==(0)
cms.frequency(1).estimate must be_==(1)
cms.frequency(2).estimate must be_==(2)
val three = CMS_MONOID.create(Seq(1L, 1L, 1L))
three.frequency(1L).estimate must be_==(3)
val four = CMS_MONOID.create(Seq(1L, 1L, 1L, 1L))
four.frequency(1L).estimate must be_==(4)
val cms2 = CMS_MONOID.plus(four, three)
cms2.frequency(1L).estimate must be_==(7)
}
"estimate inner products" in {
val totalCount = 5234
val range = 1390
val data1 = (0 to (totalCount - 1)).map { _ => RAND.nextInt(range).toLong }
val data2 = (0 to (totalCount - 1)).map { _ => RAND.nextInt(range).toLong }
val cms1 = CMS_MONOID.create(data1)
val cms2 = CMS_MONOID.create(data1)
val approxA = cms1.innerProduct(cms2)
val approx = approxA.estimate
val exact = exactInnerProduct(data1, data2)
val maxError = approx - approxA.min
approx must be_==(cms2.innerProduct(cms1).estimate)
approx must be_>=(exact)
(approx - exact) must be_<=(maxError)
}
"exactly compute inner product of small streams" in {
// Nothing in common.
val a1 = List(1L, 2L, 3L)
val a2 = List(4L, 5L, 6L)
CMS_MONOID.create(a1).innerProduct(CMS_MONOID.create(a2)).estimate must be_==(0)
// One element in common.
val b1 = List(1L, 2L, 3L)
val b2 = List(3L, 5L, 6L)
CMS_MONOID.create(b1).innerProduct(CMS_MONOID.create(b2)).estimate must be_==(1)
// Multiple, non-repeating elements in common.
val c1 = List(1L, 2L, 3L)
val c2 = List(3L, 2L, 6L)
CMS_MONOID.create(c1).innerProduct(CMS_MONOID.create(c2)).estimate must be_==(2)
// Multiple, repeating elements in common.
val d1 = List(1L, 2L, 2L, 3L, 3L)
val d2 = List(2L, 3L, 3L, 6L)
CMS_MONOID.create(d1).innerProduct(CMS_MONOID.create(d2)).estimate must be_==(6)
}
"estimate heavy hitters" in {
// Simple way of making some elements appear much more often than others.
val data1 = (1 to 3000).map { _ => RAND.nextInt(3).toLong }
val data2 = (1 to 3000).map { _ => RAND.nextInt(10).toLong }
val data3 = (1 to 1450).map { _ => -1L } // element close to being a 20% heavy hitter
val data = data1 ++ data2 ++ data3
// Find elements that appear at least 20% of the time.
val cms = (new CountMinSketchMonoid(EPS, DELTA, SEED, 0.2)).create(data)
val trueHhs = exactHeavyHitters(data, cms.heavyHittersPct)
val estimatedHhs = cms.heavyHitters
// All true heavy hitters must be claimed as heavy hitters.
(trueHhs.intersect(estimatedHhs) == trueHhs) must be_==(true)
// It should be very unlikely that any element with count less than
// (heavyHittersPct - eps) * totalCount is claimed as a heavy hitter.
val minHhCount = (cms.heavyHittersPct - cms.eps) * cms.totalCount
val infrequent = data.groupBy{ x => x }.mapValues{ _.size }.filter{ _._2 < minHhCount }.keys.toSet
infrequent.intersect(estimatedHhs).size must be_==(0)
}
"drop old heavy hitters when new heavy hitters replace them" in {
val monoid = new CountMinSketchMonoid(EPS, DELTA, SEED, 0.3)
val cms1 = monoid.create(Seq(1L, 2L, 2L))
cms1.heavyHitters must be_==(Set(1L, 2L))
val cms2 = cms1 ++ monoid.create(2L)
cms2.heavyHitters must be_==(Set(2L))
val cms3 = cms2 ++ monoid.create(1L)
cms3.heavyHitters must be_==(Set(1L, 2L))
val cms4 = cms3 ++ monoid.create(Seq(0L, 0L, 0L, 0L, 0L, 0L))
cms4.heavyHitters must be_==(Set(0L))
}
"exactly compute heavy hitters in a small stream" in {
val data1 = Seq(1L, 2L, 2L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 5L, 5L, 5L, 5L, 5L)
val cms1 = (new CountMinSketchMonoid(EPS, DELTA, SEED, 0.01)).create(data1)
val cms2 = (new CountMinSketchMonoid(EPS, DELTA, SEED, 0.1)).create(data1)
val cms3 = (new CountMinSketchMonoid(EPS, DELTA, SEED, 0.3)).create(data1)
val cms4 = (new CountMinSketchMonoid(EPS, DELTA, SEED, 0.9)).create(data1)
cms1.heavyHitters must be_==(Set(1L, 2L, 3L, 4L, 5L))
cms2.heavyHitters must be_==(Set(2L, 3L, 4L, 5L))
cms3.heavyHitters must be_==(Set(5L))
cms4.heavyHitters must be_==(Set[Long]())
}
}
}
| snoble/algebird | algebird-test/src/test/scala/com/twitter/algebird/CountMinSketchTest.scala | Scala | apache-2.0 | 7,111 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.concurrent.cancelables
import minitest.SimpleTestSuite
object RefCountCancelableSuite extends SimpleTestSuite {
test("cancel without dependent references") {
var isCanceled = false
val sub = RefCountCancelable { isCanceled = true }
sub.cancel()
assert(isCanceled)
}
test("execute onCancel with no dependent refs active") {
var isCanceled = false
val sub = RefCountCancelable { isCanceled = true }
val s1 = sub.acquire()
val s2 = sub.acquire()
s1.cancel()
s2.cancel()
assert(!isCanceled)
assert(!sub.isCanceled)
sub.cancel()
assert(isCanceled)
assert(sub.isCanceled)
}
test("execute onCancel only after all dependent refs have been canceled") {
var isCanceled = false
val sub = RefCountCancelable { isCanceled = true }
val s1 = sub.acquire()
val s2 = sub.acquire()
sub.cancel()
assert(sub.isCanceled)
assert(!isCanceled)
s1.cancel()
assert(!isCanceled)
s2.cancel()
assert(isCanceled)
}
}
| virtualirfan/monifu | core/shared/src/test/scala/monifu/concurrent/cancelables/RefCountCancelableSuite.scala | Scala | apache-2.0 | 1,714 |
package ar.com.crypticmind.basewebapp.testsupport
import ar.com.crypticmind.basewebapp.misc.IdGenerator
import javax.sql.DataSource
import com.mchange.v2.c3p0.ComboPooledDataSource
import ar.com.crypticmind.basewebapp.dal.DatabaseComponent
trait TestDataSource { this: DatabaseComponent ⇒
val testDatabaseName = "testdb-" + IdGenerator.shortId
override def createDataSource: DataSource = {
val ds = new ComboPooledDataSource
ds.setJdbcUrl(s"jdbc:h2:mem:$testDatabaseName;DB_CLOSE_DELAY=-1")
ds
}
}
| crypticmind/base-webapp | backend/src/test/scala/ar/com/crypticmind/basewebapp/testsupport/TestDataSource.scala | Scala | mit | 522 |
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.hadoop.conf.Configuration
import org.apache.spark.{ SparkContext, SparkConf }
import org.apache.spark.rdd.RDD
import org.bson.BSONObject
import com.mongodb.hadoop.{
MongoInputFormat,
MongoOutputFormat,
BSONFileInputFormat,
BSONFileOutputFormat
}
import com.mongodb.hadoop.util.MongoConfigUtil
import org.bson.types.ObjectId
import org.bson.{ BSONObject, BasicBSONObject }
import com.mongodb.hadoop.io.MongoUpdateWritable
import com.mongodb.BasicDBList
import scala.io.Source
import spray.json._
import DefaultJsonProtocol._
//val sc = SparkContext.getOrCreate
// val inputURI = "mongodb://localhost:27018/weather_db.historical"
// val outputURI = "mongodb://localhost:27018/weather_db.forecast"
object SysDist {
var sc : SparkContext = null
var hosts : List[String] = List(
"localhost:27018",
"localhost:27019",
"localhost:27020"
)
var inputDB : String = "weather_db"
var inputColl : String = "historical"
var outputDB : String = "weather_db"
var outputColl : String = "forecast"
val baseURLFormat : String = "mongodb://%s/%s.%s?replicaSet=%s"
var replicaSetName : String = "sysdist"
var readPref : String = ""
val inputConfig = new Configuration()
val outputConfig = new Configuration()
def readConfig() {
val configsStr = Source.fromFile("config.json").getLines.mkString
val jsonAst = configsStr.parseJson
//println(jsonAst.prettyPrint)
jsonAst.asJsObject.getFields("hosts", "database", "histColl", "foreColl", "replicaSet", "readPreference") match {
case Seq(JsArray(_hosts), JsString(_db), JsString(_histColl), JsString(_foreColl), JsString(_replSet), JsString(_readPref)) => {
hosts = _hosts.toList.map(_.convertTo[String])
inputDB = _db
outputDB = _db
inputColl = _histColl
outputColl = _foreColl
replicaSetName = _replSet
readPref = _readPref
}
}
}
def getURL(hostList: List[String], db: String, collection: String, replicaSet: String, readPref: String) : String = {
var baseURLFormat = "mongodb://%s/%s.%s?replicaSet=%s"
if (readPref != "") {
baseURLFormat += "&readPreference=%s"
println(">>>>> Using readPreference \\"" + readPref + "\\"")
baseURLFormat.format(hostList.reduce(_+","+_), db, collection, replicaSet, readPref)
}
else {
baseURLFormat.format(hostList.reduce(_+","+_), db, collection, replicaSet)
}
}
def kelvinToCelsius(numberAsObj : Object) : Double = {
val temp_kelvin =
(if (numberAsObj.isInstanceOf[Double])
Double.unbox(numberAsObj)
else
numberAsObj.asInstanceOf[Integer].toDouble)
temp_kelvin - 273.15
}
def saveToMongo(rdd: RDD[(Null, MongoUpdateWritable)], config: Configuration) {
rdd.saveAsNewAPIHadoopFile("", classOf[Any], classOf[Any], classOf[MongoOutputFormat[Any,Any]], config)
}
// Fix the foreign keys in the database. That means, add a hist_id field
// to entries in 'forecast' with the id from 'historical'
def fixFK() {
val fixFKInputFore = new Configuration()
fixFKInputFore.set("mongo.input.uri", getURL(hosts, "weather_db", "forecast", replicaSetName, readPref))
val name_fore = sc.newAPIHadoopRDD(fixFKInputFore, classOf[MongoInputFormat], classOf[Object], classOf[BSONObject])
val name_fore_sorted = name_fore.map(
{
case (id, obj) =>
(obj.get("city").asInstanceOf[BSONObject].get("name").toString,
// If this object already contains the foreign key, we will discard it later
// (this should happen either for every single document or for none of them)
if (obj.containsField("hist_id")) None else id.toString)
}).sortByKey(true, 1)
val fixFKInputHist = new Configuration()
fixFKInputHist.set("mongo.input.uri", getURL(hosts, "weather_db", "historical", replicaSetName, readPref))
val name_hist = sc.newAPIHadoopRDD(fixFKInputHist, classOf[MongoInputFormat], classOf[Object], classOf[BSONObject])
val name_hist_sorted =
name_hist.
map(
{
case (id, obj) =>
(obj.get("city").asInstanceOf[BSONObject].get("name").toString, id.toString)
}).
sortByKey(true, 1)
// This gives us pairs in the form ( (city1, idHistorical), (city1, idForecast) )
val zipped = name_hist_sorted.zip(name_fore_sorted)
saveToMongo(
zipped.
filter({ case ((n1, id_h), (n2, id_f)) => id_f != None }).
map(
{
case ((n1, id_h), (n2, id_f)) =>
val query = new BasicBSONObject().append("_id", new ObjectId(id_f.toString))
val update = new BasicBSONObject().append("$set", new BasicBSONObject("hist_id", new ObjectId(id_h)))
val muw = new MongoUpdateWritable(query, update, false, false)
(null, muw)
})
, outputConfig)
}
def calc_avg_temp_last5d() {
val docs = sc.newAPIHadoopRDD(inputConfig, classOf[MongoInputFormat], classOf[Object], classOf[BSONObject])
val update_avgtemp = docs.
map(x =>
(x._1.toString,
x._2.get("list").asInstanceOf[BasicDBList].toArray().takeRight(5 * 8).
map(y => {
val temp = y.asInstanceOf[BSONObject].get("main").asInstanceOf[BSONObject].get("temp")
kelvinToCelsius(temp)
}))).
map(x => (x._1, x._2.reduce(_+_)/x._2.length)).
map(x => {
val newProps = new BasicBSONObject().append("stats.avg_temp_last5d", x._2)
val query = new BasicBSONObject().append("hist_id", new ObjectId(x._1))
val update = new BasicBSONObject().append("$set", newProps)
(null, new MongoUpdateWritable(query, update, false, false))
})
saveToMongo(update_avgtemp, outputConfig)
}
def calc_minmax_temp() {
val docs = sc.newAPIHadoopRDD(inputConfig, classOf[MongoInputFormat], classOf[Object], classOf[BSONObject])
val update_minmaxtemp = docs.
map(x =>
(x._1.toString,
x._2.get("list").asInstanceOf[BasicDBList].toArray().
map(y => {
val temp = y.asInstanceOf[BSONObject].get("main").asInstanceOf[BSONObject].get("temp")
kelvinToCelsius(temp)
}))).
map(x => (x._1, (x._2.max, x._2.min))).
map(x => {
val newProps = new BasicBSONObject()
newProps.append("stats.max_temp", x._2._1)
newProps.append("stats.min_temp", x._2._2)
val query = new BasicBSONObject().append("hist_id", new ObjectId(x._1))
val update = new BasicBSONObject().append("$set", newProps)
(null, new MongoUpdateWritable(query, update, false, false))
})
saveToMongo(update_minmaxtemp, outputConfig)
}
def calc_minmax_rain() {
val docs = sc.newAPIHadoopRDD(inputConfig, classOf[MongoInputFormat], classOf[Object], classOf[BSONObject])
val update_minmaxrain = docs.
map(x =>
(x._1.toString,
x._2.get("list").asInstanceOf[BasicDBList].toArray().
map(y => {
val rain = Option(y.asInstanceOf[BSONObject].get("rain").asInstanceOf[BSONObject])
val rain_3h = Option(if (rain.isDefined) rain.get.get("3h") else null)
val rain_db : Double = (
if (!rain_3h.isDefined)
0.0 : Double
else if (rain_3h.get.isInstanceOf[Double])
Double.unbox(rain_3h.get)
else
rain_3h.get.asInstanceOf[Integer].toDouble)
val dt_txt = y.asInstanceOf[BSONObject].get("dt_txt").toString
(dt_txt, rain_db)
}).filter(x => x._2 > 0.0).groupBy(y => {
val date = y._1
date.substring(0, date.indexOf(' '))
}).toList.map(x => {
val date = x._1
val list = x._2.asInstanceOf[Array[(String,Double)]].map(_._2)
(date, list.sum)})
)).filter(x => x._2.length > 0 ).map(x => {
val _max = x._2.map(_._2).max
val _min = x._2.map(_._2).min
(x._1, (_max, _min)) } ).
map(x => {
val newProps = new BasicBSONObject()
newProps.append("stats.max_rain", x._2._1)
newProps.append("stats.min_rain", x._2._2)
val query = new BasicBSONObject().append("hist_id", new ObjectId(x._1))
val update = new BasicBSONObject().append("$set", newProps)
(null, new MongoUpdateWritable(query, update, false, false))
})
saveToMongo(update_minmaxrain, outputConfig)
}
def calc_avg_wind_5d() {
val docs = sc.newAPIHadoopRDD(inputConfig, classOf[MongoInputFormat], classOf[Object], classOf[BSONObject])
val update_avgwind = docs.
map(x =>
(x._1.toString,
x._2.get("list").asInstanceOf[BasicDBList].toArray().takeRight(8 * 5).
map(y => {
val wind = Option(y.asInstanceOf[BSONObject].get("wind").asInstanceOf[BSONObject])
val wind_speed = Option(if (wind.isDefined) wind.get.get("speed") else null)
val wind_db : Double = (
if (!wind_speed.isDefined)
0.0 : Double
else if (wind_speed.get.isInstanceOf[Double])
Double.unbox(wind_speed.get)
else
wind_speed.get.asInstanceOf[Integer].toDouble)
wind_db
}))).
map(x =>
(x._1, x._2.reduce(_ + _)/x._2.length)).
map(x => {
val newProp = new BasicBSONObject().append("stats.avg_rain_last5d", x._2)
val query = new BasicBSONObject().append("hist_id", new ObjectId(x._1))
val update = new BasicBSONObject().append("$set", newProp)
(null, new MongoUpdateWritable(query, update, false, false))
})
saveToMongo(update_avgwind, outputConfig)
}
def calc_minmax_wind() {
val docs = sc.newAPIHadoopRDD(inputConfig, classOf[MongoInputFormat], classOf[Object], classOf[BSONObject])
val update_minmaxwind = docs.
map(x =>
(x._1.toString,
x._2.get("list").asInstanceOf[BasicDBList].toArray().
map(y => {
val wind = Option(y.asInstanceOf[BSONObject].get("wind").asInstanceOf[BSONObject])
val wind_speed = Option(if (wind.isDefined) wind.get.get("speed") else null)
val wind_db : Double = (
if (!wind_speed.isDefined)
0.0 : Double
else if (wind_speed.get.isInstanceOf[Double])
Double.unbox(wind_speed.get)
else
wind_speed.get.asInstanceOf[Integer].toDouble)
val dt_txt = y.asInstanceOf[BSONObject].get("dt_txt").toString
(dt_txt, wind_db)
}).filter(x => x._2 > 0.0).groupBy(y => {
val date = y._1
date.substring(0, date.indexOf(' '))
}).toList.map(x => {
val date = x._1
val list = x._2.asInstanceOf[Array[(String,Double)]].map(_._2)
(date, (if (list.length == 0) None else Some(list.max)) : Option[Double])})
)).map(x => (x._1, x._2.filter(_._2.isDefined))).map(x => {
val _max = x._2.map(_._2.get).max
val _min = x._2.map(_._2.get).min
(x._1, (_max, _min)) } ).
map(x => {
val newProps = new BasicBSONObject()
newProps.append("stats.max_wind", x._2._1)
newProps.append("stats.min_wind", x._2._2)
val query = new BasicBSONObject().append("hist_id", new ObjectId(x._1))
val update = new BasicBSONObject().append("$set", newProps)
(null, new MongoUpdateWritable(query, update, false, false))
})
saveToMongo(update_minmaxwind, outputConfig)
}
def main(args : Array[String]) {
println(">>>>> Starting...")
readConfig
val conf = new SparkConf().setAppName("SysDist")
sc = new SparkContext(conf)
var inputURI = String.format(baseURLFormat, hosts.reduce(_ +","+ _), inputDB, inputColl, replicaSetName)
var outputURI = String.format(baseURLFormat, hosts.reduce(_ +","+ _), outputDB, outputColl, replicaSetName)
if (args.length > 0) {
readPref = args(0)
}
MongoConfigUtil.setOutputURI(outputConfig, outputURI)
MongoConfigUtil.setInputURI(inputConfig, getURL(hosts, inputDB, inputColl, replicaSetName, readPref))
val docs = sc.newAPIHadoopRDD(
inputConfig,
classOf[MongoInputFormat],
classOf[Object],
classOf[BSONObject]
)
println(">>>>> There are " + docs.count.toString + " documents in the base")
println(">>>>> Fixing database's FKs...")
fixFK
println(">>>>> Fixed :)")
println("Calculating temperature records...")
calc_minmax_temp
println("Finished...")
println(">>>>> Calculating avg temp last 5 days")
calc_avg_temp_last5d
println(">>>>> Calculations finished")
println(">>>>> Calculate rain records...")
calc_minmax_rain
println(">>>>> Finished...")
println(">>>>> Calculate wind records...")
calc_minmax_wind
println(">>>>> Finished...")
println(">>>>> Calculate wind avgs...")
calc_avg_wind_5d
println(">>>>> Finished...")
//println(">>>>> Result : " + (sc.parallelize(List.range(1,10000)).sum).toString)
}
}
| Hold7/DataViz | spark/src/main/scala/SysDist.scala | Scala | mit | 13,183 |
/*
* Copyright 2006-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package util
import java.security.SecureRandom
import java.util.regex._
import java.lang.Character._
import java.lang.{StringBuilder => GoodSB}
import scala.language.implicitConversions
import scala.xml.NodeSeq
import common._
object StringHelpers extends StringHelpers
/**
* Utility methods for manipulating strings.
*/
trait StringHelpers {
/** random numbers generator */
private val _random = new SecureRandom
/**
* If str is surrounded by quotes it return the content between the quotes
*/
def unquote(str: String) = {
if (str != null && str.length >= 2 && str.charAt(0) == '\\"' && str.charAt(str.length - 1) == '\\"')
str.substring(1, str.length - 1)
else
str
}
/**
* Splits a string of the form <name1=value1, name2=value2, ... > and unquotes the quoted values.
* The result is a Map[String, String]
*/
def splitNameValuePairs(props: String): Map[String, String] = {
val list = props.split(",").toList.map(in => {
val pair = in.roboSplit("=")
(pair(0), unquote(pair(1)))
})
val map: Map[String, String] = Map.empty
(map /: list)((m, next) => m + (next))
}
/**
* Replaces the value found in a string surrounded by <%= ... %> by a replacement according to the value found in the subst Map.<p/>
* Throws an exception if no correspondance can be found.
*
* @param msg string where replacements should be done
* @param subst map of [regular expression with groups, replacement]
*/
def processString(msg: String, subst: Map[String, String]): String = {
val pattern = Pattern.compile("\\\\<\\\\%\\\\=([^\\\\%]*)\\\\%\\\\>")
val m = pattern.matcher(msg)
val ret = new StringBuffer
while (m.find) {
m.appendReplacement(ret, subst(m.group(1).trim))
}
m.appendTail(ret)
ret.toString
}
/**
* Turn a string of format "FooBar" into snake case "foo_bar"
*
* Note: snakify is not reversible, ie. in general the following will _not_ be true:
*
* s == camelify(snakify(s))
*
* @return the underscored string
*/
def snakify(name : String) = name.replaceAll("([A-Z]+)([A-Z][a-z])", "$1_$2").replaceAll("([a-z\\\\d])([A-Z])", "$1_$2").toLowerCase
/**
* Turns a string of format "foo_bar" into camel case "FooBar"
*
* Functional code courtesy of Jamie Webb (j@jmawebb.cjb.net) 2006/11/28
* @param name the String to CamelCase
*
* @return the CamelCased string
*/
def camelify(name : String): String = {
def loop(x : List[Char]): List[Char] = (x: @unchecked) match {
case '_' :: '_' :: rest => loop('_' :: rest)
case '_' :: c :: rest => Character.toUpperCase(c) :: loop(rest)
case '_' :: Nil => Nil
case c :: rest => c :: loop(rest)
case Nil => Nil
}
if (name == null)
""
else
loop('_' :: name.toList).mkString
}
/**
* Turn a string of format "foo_bar" into camel case with the first letter in lower case: "fooBar"
* This function is especially used to camelCase method names.
*
* @param name the String to CamelCase
*
* @return the CamelCased string
*/
def camelifyMethod(name: String): String = {
val tmp: String = camelify(name)
if (tmp.length == 0)
""
else
tmp.substring(0,1).toLowerCase + tmp.substring(1)
}
/**
* Capitalize every "word" in the string. A word is either separated by spaces or underscores.
* @param in string to capify
* @return the capified string
*/
def capify(in: String): String = {
val tmp = ((in match {
case null => ""
case s => s
}).trim match {
case "" => "n/a"
case s => s
}).toLowerCase
val sb = new GoodSB
capify(tmp, 0, 250, false, false, sb)
sb.toString
}
/**
* Replaces the groups found in the msg string with a replacement according to the value found in the subst Map
* @param msg string where replacements should be done
* @param subst map of [regular expression with groups, replacement]
*/
private def capify(in: String, pos: Int, max: Int, lastLetter: Boolean, lastSymbol: Boolean, out: GoodSB): Unit = {
if (pos >= max || pos >= in.length) return
else {
in.charAt(pos) match {
case c if Character.isDigit(c) => out.append(c); capify(in, pos + 1, max, false, false, out)
case c if Character.isLetter(c) => out.append(if (lastLetter) c else Character.toUpperCase(c)) ; capify(in, pos + 1, max, true, false, out)
case c if (c == ' ' || c == '_') && !lastSymbol => out.append(c) ; capify(in, pos + 1, max, false, true, out)
case _ => capify(in, pos + 1, max, false, true, out)
}
}
}
/**
* Remove all the characters from a string exception a-z, A-Z, 0-9, and '_'
* @return the cleaned string and an empty string if the input is null
*/
def clean(in : String) = if (in == null) "" else in.replaceAll("[^a-zA-Z0-9_]", "")
/**
* Create a random string of a given size. 5 bits of randomness per character
* @param size size of the string to create. Must be a positive integer.
* @return the generated string
*/
def randomString(size: Int): String = {
def addChar(pos: Int, lastRand: Int, sb: GoodSB): GoodSB = {
if (pos >= size) sb
else {
val randNum = if ((pos % 6) == 0) {
_random.synchronized(_random.nextInt)
} else {
lastRand
}
sb.append((randNum & 0x1f) match {
case n if n < 26 => ('A' + n).toChar
case n => ('0' + (n - 26)).toChar
})
addChar(pos + 1, randNum >> 5, sb)
}
}
addChar(0, 0, new GoodSB(size)).toString
}
/**
* Create the unicode value of a character
* @param in character
* @return the unicode value as a string starting by \\\\u
*/
def escChar(in: Char): String = {
val ret = Integer.toString(in.toInt, 16)
"\\\\u"+("0000".substring(ret.length)) + ret
}
/**
* Split a string separated by a point or by a column in 2 parts. Uses default values if only one is found or if no parts are found
* @param in string to split
* @param first default value for the first part if no split can be done
* @param second default value for the second part if one or less parts can be found
* @return a pair containing the first and second parts
*/
def splitColonPair(in: String, first: String, second: String): (String, String) = {
(in match {
case null => List("")
case s if s.indexOf(".") != -1 => s.roboSplit("\\\\.")
case s => s.roboSplit(":")
}) match {
case f :: s :: _ => (f,s)
case f :: Nil => (f, second)
case _ => (first, second)
}
}
/** @return an Empty can if the node seq is empty and a full can with the NodeSeq text otherwise */
implicit def nodeSeqToOptionString(in: NodeSeq): Box[String] = if (in.length == 0) Empty else Full(in.text)
/**
* Parse a string and return the Long value of that string.<p/>
* The string can start with '-' if it is a negative number or '+' for a positive number
* @return the Long value of the input String
*/
def parseNumber(tin: String): Long = {
def cToL(in: Char) = in.toLong - '0'.toLong
def p(in: List[Char]) = in.takeWhile(isDigit).foldLeft(0L)((acc,c) => (acc * 10L) + cToL(c))
if (tin eq null) 0L
else {
tin.trim.toList match {
case '-' :: xs => -p(xs)
case '+' :: xs => p(xs)
case xs => p(xs)
}
}
}
/**
* Creates a List of Strings from two Strings
*/
def listFromStrings(s1: String, s2: String): List[String] = List(s1, s2)
/**
* Creates a List of Strings from a List[String] and a String
*/
def listFromListAndString(lst: List[String], s: String): List[String] =
lst ::: List(s)
/**
* Split a string according to a separator
* @param sep a regexp to use with the String::split method
* @return a list of trimmed parts whose length is > 0
*/
def roboSplit(what: String, sep: String): List[String] =
what match {
case null => Nil
case s => s.split(sep).toList.map(_.trim).filter(_.length > 0)
}
/**
* Faster than roboSplit... this method splits Strings at a given
* character
*/
def charSplit(what: String, sep: Char): List[String] =
what match {
case null => Nil
case str => {
val ret = new scala.collection.mutable.ListBuffer[String]
val len = str.length
var pos = 0
var lastPos = 0
while (pos < len) {
if (str.charAt(pos) == sep) {
if (pos > lastPos) {
val ns = str.substring(lastPos, pos)
ret += ns
}
lastPos = pos + 1
}
pos += 1
}
if (pos > lastPos) {
ret += str.substring(lastPos, pos)
}
ret.toList
}
}
/**
* Split a string in 2 parts at the first place where a separator is found
* @return a List containing a pair of the 2 trimmed parts
*/
def splitAt(what: String, sep: String): List[(String, String)] = {
if (null eq what)
return Nil
else
what.indexOf(sep) match {
case -1 => Nil
case n => List((what.substring(0, n).trim, what.substring(n + sep.length).trim))
}
}
/**
* Encode the string to be including in JavaScript, replacing '\\' or '\\\\' or non-ASCII characters by their unicode value
* @return the encoded string inserted into quotes
*/
def encJs(what: String): String = {
if (what eq null) "null"
else {
val len = what.length
val sb = new GoodSB(len * 2)
sb.append('"')
var pos = 0
while (pos < len) {
what.charAt(pos) match {
case c @ ('\\\\' | '\\'') => sb.append(escChar(c))
case '"' => sb.append("\\\\\\"")
case c if c < ' ' || c > '~' || c == ']' || c.toInt >= 127 => sb.append(escChar(c))
case c => sb.append(c)
}
pos += 1
}
sb.append('"')
sb.toString
}
}
/**
* Add commas before the last 3 characters
* @return the string with commas
*/
def commafy(what: String): String = {
if (null eq what) null
else {
val toDo = what.toList.reverse
def commaIt(in: List[Char]): List[Char] = in match {
case Nil => in
case x :: Nil => in
case x1 :: x2 :: Nil => in
case x1 :: x2 :: x3 :: Nil => in
case x1 :: x2 :: x3 :: xs => x1 :: x2 :: x3 :: ',' :: commaIt(xs)
}
commaIt(toDo).reverse.mkString("")
}
}
/** @return a SuperString with more available methods such as roboSplit or commafy */
implicit def stringToSuper(in: String): SuperString = new SuperString(in)
/** @return a SuperString with more available methods such as roboSplit or commafy */
implicit def listStringToSuper(in: List[String]): SuperListString = new SuperListString(in)
/**
* Test for null and return either the given String if not null or the blank String.
*/
def blankForNull(s: String) = if (s != null) s else ""
/**
* Turn a String into a Box[String], with Empty for the blank string.
*
* A string containing only spaces is considered blank.
*
* @return Full(s.trim) if s is not null or blank, Empty otherwise
*/
def emptyForBlank(s: String) = blankForNull(s).trim match {
case "" => Empty
case s => Full(s)
}
}
/**
* A class that allows chaining "foo" / "bar" / "baz"
*/
final class SuperListString(lst: List[String]) {
/**
* Add the / method that allows chaining "foo" / "bar" / "baz"
*/
def /(str: String): List[String] = lst ::: List(str)
}
/**
* The SuperString class adds functionalities to the String class
*/
final case class SuperString(what: String) {
/**
* Split a string according to a separator
* @param sep a regexp to use with the String::split method
* @return a list of trimmed parts whose length is > 0
*/
def roboSplit(sep: String): List[String] = Helpers.roboSplit(what, sep)
/**
* Faster than roboSplit... this method splits Strings at a given
* character
*/
def charSplit(sep: Char): List[String] = Helpers.charSplit(what, sep)
/**
* Split a string in 2 parts at the first place where a separator is found
* @return a List containing a pair of the 2 trimmed parts
*/
def splitAt(sep: String): List[(String, String)] = Helpers.splitAt(what, sep)
/**
* Encode the string to be including in JavaScript, replacing '\\' or '\\\\' or non-ASCII characters by their unicode value
* @return the encoded string inserted into quotes
*/
def encJs: String = Helpers.encJs(what)
/**
* Add commas before the last 3 characters
* @return the string with commas
*/
def commafy: String = Helpers.commafy(what)
/**
* Create a List of Strings using the / method so you
* can write "foo" / "bar"
*/
def /(other: String): List[String] = List(what, other)
}
| sortable/framework | core/util/src/main/scala/net/liftweb/util/StringHelpers.scala | Scala | apache-2.0 | 13,604 |
package com.tibidat
import scala.slick.driver.JdbcProfile
import scala.slick.driver.H2Driver
trait Profile {
val profile: JdbcProfile
}
class DAL(override val profile: JdbcProfile) extends NoteComponent with Profile {
import profile.simple._
val ddl = (notes.ddl)
def create(implicit session: Session): Unit = {
try {
ddl.create
} catch {
case e: Exception => println("Could not create database.... assuming it already exists")
}
}
def drop(implicit session: Session): Unit = {
try {
ddl.drop
} catch {
case e: Exception => println("Could not drop database")
}
}
def purge(implicit session: Session) = { drop; create }
}
| m2w/ephemeral | src/main/scala/com/tibidat/DAL.scala | Scala | mit | 698 |
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.auth
import org.eknet.publet.vfs.Container
/**
* @author Eike Kettner eike.kettner@gmail.com
* @since 04.11.12 16:03
*/
trait Authorizable {
this: Container =>
def isAuthorized(action: ResourceAction.Action): Boolean
}
| eikek/publet | auth/src/main/scala/org/eknet/publet/auth/Authorizable.scala | Scala | apache-2.0 | 851 |
package ru.stachek66.tools
import java.io.{IOException, File, FileInputStream}
import java.util.zip.GZIPInputStream
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream
import org.slf4j.LoggerFactory
import scala.util.Try
/**
* *.tar.gz files decompression tool
* alexeyev
* 31.08.14.
*/
private object TarGz extends Decompressor {
private val log = LoggerFactory.getLogger(getClass)
def traditionalExtension: String = "tar.gz"
/**
* Untars -single- file
*/
@throws(classOf[IOException])
def unpack(src: File, dst: File): File = {
log.debug(s"Unpacking $src to $dst...")
val tarIn =
new TarArchiveInputStream(
new GZIPInputStream(
new FileInputStream(src)))
val result = copyUncompressedAndClose(tarIn, dst)
log.debug(s"Done.")
result
}
}
| alexeyev/mystem-scala | src/main/scala/ru/stachek66/tools/TarGz.scala | Scala | mit | 834 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.junit
import scala.concurrent.Future
/* Use the queue execution context (based on JS promises) explicitly:
* We do not have anything better at our disposal and it is accceptable in
* terms of fairness: All we use it for is to map over a completed Future once.
*/
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import scala.util.{Try, Success}
package object async {
type AsyncResult = Future[Try[Unit]]
def await(f: Future[_]): AsyncResult = f.map(_ => Success(()))
}
| scala-js/scala-js | junit-async/js/src/main/scala/org/scalajs/junit/async/package.scala | Scala | apache-2.0 | 793 |
package org.jetbrains.jps.incremental.scala
import org.jetbrains.jps.incremental.ModuleLevelBuilder.ExitCode
import org.jetbrains.jps.incremental.scala.data.{CompilationData, CompilerData, SbtData}
/**
* @author Pavel Fatin
*/
trait Server {
def compile(sbtData: SbtData, compilerData: CompilerData, compilationData: CompilationData, client: Client): ExitCode
} | triplequote/intellij-scala | scala/compiler-jps/src/org/jetbrains/jps/incremental/scala/Server.scala | Scala | apache-2.0 | 367 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
**/
package com.solidfire.jsvcgen.codegen
import com.solidfire.jsvcgen.codegen
import com.solidfire.jsvcgen.model.{ServiceDefinition, TypeDefinition}
import scala.collection.immutable.Map
import scala.reflect.ClassTag
class PythonCodeGenerator( options: CliConfig )
extends BaseCodeGenerator( options, nickname = Some( "python" ) ) {
def formatTypeName( src: String ) = Util.camelCase( src, firstUpper = true )
override def groupItemsToFiles( service: ServiceDefinition ): Map[String, Any] = {
val types = service.types.filter( _.alias.isEmpty )
.map( typeDef => (pathFor( typeDef ), typeDef) )
.groupBy( _._1 )
.mapValues( _.map( _._2 ) )
Map( pathFor( service ) -> service ) ++ types
}
def pathFor( service: ServiceDefinition ) = {
getProjectPathFromNamespace + "__init__.py"
}
def pathFor( typ: TypeDefinition ) = {
if (typ.name.endsWith( "Result" ))
getProjectPathFromNamespace + "results.py"
else
getProjectPathFromNamespace + "models.py"
}
private def getProjectPathFromNamespace: String = {
val splitNamespace = options.namespace.split( '.' )
val projectPath = splitNamespace.drop( splitNamespace.indexWhere( _ == options.output.getName ) + 1 )
val path = codegen.Util.pathForNamespace( projectPath.mkString( "." ) ) + "/"
path
}
override def getTemplatePath[T]( )( implicit tag: ClassTag[T] ) = {
if (tag.runtimeClass.getSuperclass.getSimpleName.endsWith( "List" ))
"/codegen/" + nickname.getOrElse( getClass.getName ) + "/TypeDefinitions.ssp"
else
super.getTemplatePath[T]
}
override protected def getDefaultMap[T]( service: ServiceDefinition, value: T )( implicit tag: ClassTag[T] ): Map[String, Any] =
super.getDefaultMap( service, value ) ++ Map( "format" -> new PythonCodeFormatter( options, service ) )
}
| solidfire/jsvcgen | jsvcgen/src/main/scala/com/solidfire/jsvcgen/codegen/PythonCodeGenerator.scala | Scala | apache-2.0 | 2,667 |
package spark.streaming.sql
import spark.RDD
import spark.streaming.{Duration, DStream, Job, Time}
private[streaming]
class SQLForEachDStream[T: ClassManifest] (
parent: SQLOperatorDStream[T],
foreachFunc: (RDD[T], Time) => Unit
) extends SQLOperatorDStream[T](parent.ssc) {
ssc.registerOutputStream(this)
override def dependencies = List(parent)
override def slideDuration: Duration = parent.slideDuration
override def compute(validTime: Time, child : SQLOperatorDStream[T], amILeftParent : Boolean): Option[RDD[T]] = None
override def generateJob(time: Time): Option[Job] = {
parent.getOrCompute(time, this, true) match {
case Some(rdd) =>
val jobFunc = () => {
foreachFunc(rdd, time)
}
Some(new Job(time, jobFunc))
case None => None
}
}
}
| pxgao/spark-0.7.3 | streaming/src/main/scala/spark/streaming/sql/SQLForEachDStream.scala | Scala | bsd-3-clause | 825 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
import play.sbt.PlayScala
import sbt.Keys._
import sbt._
object ApplicationBuild extends Build {
val appName = "secret-sample"
val appVersion = "1.0-SNAPSHOT"
val Secret = """(?s).*play.http.secret.key="(.*)".*""".r
val main = Project(appName, file(".")).enablePlugins(PlayScala).settings(
version := appVersion,
TaskKey[Unit]("checkSecret") := {
val file = IO.read(baseDirectory.value / "conf/application.conf")
file match {
case Secret("changeme") => throw new RuntimeException("secret not changed!!\\n" + file)
case Secret(_) =>
case _ => throw new RuntimeException("secret not found!!\\n" + file)
}
}
)
}
| wsargent/playframework | framework/src/sbt-plugin/src/sbt-test/play-sbt-plugin/secret/project/Build.scala | Scala | apache-2.0 | 754 |
package org.openeyes.api.utils
/**
* Created by stu on 22/09/2014.
*/
object String {
def getObjectClassName(obj: Object) = {
obj.getClass.getName
}
}
| openeyes/poc-backend | src/main/scala/org/openeyes/api/utils/String.scala | Scala | gpl-3.0 | 163 |
/**
* Copyright 2017 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftmodules.kafkaactors
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer._
import java.util.{Map => JMap}
/**
* This is the parent trait for messages that KafkaActors handle internally. It's not possible
* for user code to intercept any messages that subclass this trait.
*/
sealed trait InternalKafkaActorMessage
/**
* Tells the actor to signal the consumer thread to commit the enclosed offsets.
*
* This message is essentially a checkpoint for consumption. Once the offsets are committed
* the messages before that point won't be reconsumed if the actor crashes and restarts.
*/
case class CommitOffsets(
offsets: JMap[TopicPartition, OffsetAndMetadata]
) extends InternalKafkaActorMessage
/**
* Instruction for the KafkaActor to start consuming messages from Kafka.
*
* User code must send this message to newly created Kafka actors to cause them to start
* consuming from Kafka. Until this message is sent, a KafkaActor is no more than a regular
* LiftActor. If you wish to have Kafka-consuming behavior toggleable, you should be able to
* add code paths that do or don't send this message to the relevant actor.
*/
case object StartConsumer extends InternalKafkaActorMessage
/**
* Instruction for the KafkaActor to stop consuming messages from Kafka.
*
* We recommend sending this message once your application knows its going to shut down so that
* consumption can finish up cleanly.
*/
case object StopConsumer extends InternalKafkaActorMessage
| liftmodules/kafka-actors | src/main/scala/net/liftmodules/kafkaactors/InternalKafkaActorMessage.scala | Scala | apache-2.0 | 2,129 |
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.scalnet.layers.advanced.activations
import org.deeplearning4j.nn.conf.layers.{ ActivationLayer => JActivationLayer }
import org.deeplearning4j.scalnet.layers.core.Layer
import org.nd4j.linalg.activations.impl.ActivationLReLU
/**
* LeakyReLU layer
*
* @author Max Pumperla
*/
class LeakyReLU(alpha: Double, nOut: Option[List[Int]], nIn: Option[List[Int]], override val name: String = "")
extends Layer {
override def compile: org.deeplearning4j.nn.conf.layers.Layer =
new JActivationLayer.Builder()
.activation(new ActivationLReLU(alpha))
.name(name)
.build()
override val outputShape: List[Int] = nOut.getOrElse(List(0))
override val inputShape: List[Int] = nIn.getOrElse(List(0))
override def reshapeInput(newIn: List[Int]): LeakyReLU =
new LeakyReLU(alpha, Some(newIn), Some(newIn), name)
}
object LeakyReLU {
def apply(alpha: Double, nOut: Int = 0, nIn: Int = 0, name: String = ""): LeakyReLU =
new LeakyReLU(alpha, Some(List(nOut)), Some(List(nIn)), name)
}
| deeplearning4j/deeplearning4j | scalnet/src/main/scala/org/deeplearning4j/scalnet/layers/advanced/activations/LeakyReLU.scala | Scala | apache-2.0 | 1,822 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package tools.nsc
package doc
package model
import base.comment._
import diagram._
import scala.collection._
import scala.util.matching.Regex
import scala.reflect.macros.internal.macroImpl
import symtab.Flags
import io._
import model.{ RootPackage => RootPackageEntity }
/** This trait extracts all required information for documentation from compilation units */
class ModelFactory(val global: Global, val settings: doc.Settings) {
thisFactory: ModelFactory
with ModelFactoryImplicitSupport
with ModelFactoryTypeSupport
with DiagramFactory
with CommentFactory
with TreeFactory
with MemberLookup =>
import global._
import definitions.{ ObjectClass, NothingClass, AnyClass, AnyValClass, AnyRefClass, AnnotationClass }
import rootMirror.{ RootPackage, EmptyPackage }
import ModelFactory._
def templatesCount = docTemplatesCache.count(_._2.isDocTemplate) - droppedPackages.size
private var _modelFinished = false
def modelFinished: Boolean = _modelFinished
private var universe: Universe = null
def makeModel: Option[Universe] = {
val universe = new Universe { thisUniverse =>
thisFactory.universe = thisUniverse
val settings = thisFactory.settings
val rootPackage = modelCreation.createRootPackage
}
_modelFinished = true
// complete the links between model entities, everything that couldn't have been done before
universe.rootPackage.completeModel()
Some(universe) filter (_.rootPackage != null)
}
// state:
var ids = 0
private val droppedPackages = mutable.Set[PackageImpl]()
protected val docTemplatesCache = new mutable.LinkedHashMap[Symbol, DocTemplateImpl]
protected val noDocTemplatesCache = new mutable.LinkedHashMap[Symbol, NoDocTemplateImpl]
def packageDropped(tpl: DocTemplateImpl) = tpl match {
case p: PackageImpl => droppedPackages(p)
case _ => false
}
// This unsightly hack closes issue scala/bug#4086.
private lazy val modifiedSynchronized: Symbol = {
val sym = definitions.Object_synchronized
val info = (sym.info: @unchecked) match {
case PolyType(ts, MethodType(List(bp), mt)) =>
val cp = bp.cloneSymbol.setPos(bp.pos).setInfo(definitions.byNameType(bp.info))
PolyType(ts, MethodType(List(cp), mt))
}
sym.cloneSymbol.setPos(sym.pos).setInfo(info)
}
def optimize(str: String): String =
if (str.length < 16) str.intern else str
/* ============== IMPLEMENTATION PROVIDING ENTITY TYPES ============== */
abstract class EntityImpl(val sym: Symbol, val inTpl: TemplateImpl) extends Entity {
val name = optimize(sym.nameString)
val universe = thisFactory.universe
// Debugging:
// assert(id != 36, sym + " " + sym.getClass)
//println("Creating entity #" + id + " [" + kind + " " + qualifiedName + "] for sym " + sym.kindString + " " + sym.ownerChain.reverse.map(_.name).mkString("."))
def inTemplate: TemplateImpl = inTpl
def toRoot: List[EntityImpl] = this :: inTpl.toRoot
def qualifiedName = name
def annotations = sym.annotations.filterNot(_.atp =:= typeOf[macroImpl]).map(makeAnnotation)
def inPackageObject: Boolean = sym.owner.isModuleClass && sym.owner.sourceModule.isPackageObject
def isType = sym.name.isTypeName
}
trait TemplateImpl extends EntityImpl with TemplateEntity {
override def qualifiedName: String =
if (inTemplate == null || inTemplate.isRootPackage) name else optimize(inTemplate.qualifiedName + "." + name)
def isPackage = sym.hasPackageFlag
def isTrait = sym.isTrait
def isClass = sym.isClass && !sym.isTrait
def isObject = sym.isModule && !sym.hasPackageFlag
def isCase = sym.isCase
def isRootPackage = false
def selfType = if (sym.thisSym eq sym) None else Some(makeType(sym.thisSym.typeOfThis, this))
}
abstract class MemberImpl(sym: Symbol, inTpl: DocTemplateImpl) extends EntityImpl(sym, inTpl) with MemberEntity {
// If the current tpl is a DocTemplate, we consider itself as the root for resolving link targets (instead of the
// package the class is in) -- so people can refer to methods directly [[foo]], instead of using [[MyClass.foo]]
// in the doc comment of MyClass
def linkTarget: DocTemplateImpl = inTpl
// if there is a field symbol, the ValDef will use it, which means docs attached to it will be under the field symbol, not the getter's
protected[this] def commentCarryingSymbol(sym: Symbol) =
if (sym == modifiedSynchronized) definitions.Object_synchronized
else if (sym.hasAccessorFlag && sym.accessed.exists) sym.accessed
else sym
lazy val comment = thisFactory.comment(commentCarryingSymbol(sym), linkTarget, inTpl)
def group = comment flatMap (_.group) getOrElse defaultGroup
override def inTemplate = inTpl
override def toRoot: List[MemberImpl] = this :: inTpl.toRoot
def inDefinitionTemplates =
if (inTpl == null)
docTemplatesCache(RootPackage) :: Nil
else
makeTemplate(sym.owner)::(sym.allOverriddenSymbols map { inhSym => makeTemplate(inhSym.owner) })
def visibility = {
if (sym.isPrivateLocal) PrivateInInstance()
else if (sym.isProtectedLocal) ProtectedInInstance()
else {
val qual =
if (sym.hasAccessBoundary) {
val qualTpl = makeTemplate(sym.privateWithin)
if (qualTpl != inTpl) Some(qualTpl)
else None
} else None
def tp(c: TemplateImpl) = makeType(c.sym.tpe, inTpl)
if (sym.isPrivate) PrivateInTemplate(None)
else if (sym.isProtected) ProtectedInTemplate(qual.map(tp))
else qual match {
case Some(q) => PrivateInTemplate(Some(tp(q)))
case None => Public()
}
}
}
def flags = {
val fgs = mutable.ListBuffer.empty[Paragraph]
if (sym.isImplicit) fgs += Paragraph(Text("implicit"))
if (sym.isSealed) fgs += Paragraph(Text("sealed"))
if (!sym.isTrait && (sym hasFlag Flags.ABSTRACT)) fgs += Paragraph(Text("abstract"))
/* Resetting the DEFERRED flag is a little trick here for refined types: (example from scala.collections)
* {{{
* implicit def traversable2ops[T](t: scala.collection.GenTraversableOnce[T]) = new TraversableOps[T] {
* def isParallel = ...
* }}}
* the type the method returns is TraversableOps, which has all-abstract symbols. But in reality, it couldn't have
* any abstract terms, otherwise it would fail compilation. So we reset the DEFERRED flag. */
if (!sym.isTrait && (sym hasFlag Flags.DEFERRED) && (!isImplicitlyInherited)) fgs += Paragraph(Text("abstract"))
if (!sym.isModule && (sym hasFlag Flags.FINAL)) fgs += Paragraph(Text("final"))
if (sym.isMacro) fgs += Paragraph(Text("macro"))
fgs.toList
}
def deprecation =
if (sym.isDeprecated)
Some((sym.deprecationMessage, sym.deprecationVersion) match {
case (Some(msg), Some(ver)) => parseWiki("''(Since version " + ver + ")'' " + msg, NoPosition, inTpl)
case (Some(msg), None) => parseWiki(msg, NoPosition, inTpl)
case (None, Some(ver)) => parseWiki("''(Since version " + ver + ")''", NoPosition, inTpl)
case (None, None) => Body(Nil)
})
else
comment flatMap { _.deprecated }
def migration =
if(sym.hasMigrationAnnotation)
Some((sym.migrationMessage, sym.migrationVersion) match {
case (Some(msg), Some(ver)) => parseWiki("''(Changed in version " + ver + ")'' " + msg, NoPosition, inTpl)
case (Some(msg), None) => parseWiki(msg, NoPosition, inTpl)
case (None, Some(ver)) => parseWiki("''(Changed in version " + ver + ")''", NoPosition, inTpl)
case (None, None) => Body(Nil)
})
else
None
def resultType = {
def resultTpe(tpe: Type): Type = tpe match { // similar to finalResultType, except that it leaves singleton types alone
case PolyType(_, res) => resultTpe(res)
case MethodType(_, res) => resultTpe(res)
case NullaryMethodType(res) => resultTpe(res)
case _ => tpe
}
val tpe = byConversion.fold(sym.tpe) (_.toType memberInfo sym)
makeTypeInTemplateContext(resultTpe(tpe), inTemplate, sym)
}
def isDef = false
def isVal = false
def isLazyVal = false
def isVar = false
def isConstructor = false
def isAliasType = false
def isAbstractType = false
def isAbstract =
// for the explanation of conversion == null see comment on flags
((!sym.isTrait && ((sym hasFlag Flags.ABSTRACT) || (sym hasFlag Flags.DEFERRED)) && (!isImplicitlyInherited)) ||
sym.isAbstractClass || sym.isAbstractType) && !sym.isSynthetic
def signature = externalSignature(sym)
lazy val signatureCompat = {
def defParams(mbr: Any): String = mbr match {
case d: MemberEntity with Def =>
val paramLists: List[String] =
if (d.valueParams.isEmpty) Nil
else d.valueParams.map(ps => ps.map(_.resultType.name).mkString("(",",",")"))
paramLists.mkString
case _ => ""
}
def tParams(mbr: Any): String = mbr match {
case hk: HigherKinded if hk.typeParams.nonEmpty =>
def boundsToString(hi: Option[TypeEntity], lo: Option[TypeEntity]): String = {
def bound0(bnd: Option[TypeEntity], pre: String): String = bnd match {
case None => ""
case Some(tpe) => pre ++ tpe.toString
}
bound0(hi, "<:") ++ bound0(lo, ">:")
}
"[" + hk.typeParams.map(tp => tp.variance + tp.name + tParams(tp) + boundsToString(tp.hi, tp.lo)).mkString(", ") + "]"
case _ => ""
}
(name + tParams(this) + defParams(this) +":"+ resultType.name).replaceAll("\\\\s","") // no spaces allowed, they break links
}
// these only apply for NonTemplateMemberEntities
def useCaseOf: Option[MemberImpl] = None
def byConversion: Option[ImplicitConversionImpl] = None
def isImplicitlyInherited = false
def isShadowedImplicit = false
def isAmbiguousImplicit = false
def isShadowedOrAmbiguousImplicit = false
}
/** A template that is not documented at all. The class is instantiated during lookups, to indicate that the class
* exists, but should not be documented (either it's not included in the source or it's not visible)
*/
class NoDocTemplateImpl(sym: Symbol, inTpl: TemplateImpl) extends EntityImpl(sym, inTpl) with TemplateImpl with HigherKindedImpl with NoDocTemplate {
assert(modelFinished, this)
assert(!(noDocTemplatesCache isDefinedAt sym), (sym, noDocTemplatesCache(sym)))
noDocTemplatesCache += (sym -> this)
def isDocTemplate = false
}
/** An inherited template that was not documented in its original owner - example:
* in classpath: trait T { class C } -- T (and implicitly C) are not documented
* in the source: trait U extends T -- C appears in U as a MemberTemplateImpl -- that is, U has a member for it
* but C doesn't get its own page
*/
abstract class MemberTemplateImpl(sym: Symbol, inTpl: DocTemplateImpl) extends MemberImpl(sym, inTpl) with TemplateImpl with HigherKindedImpl with MemberTemplateEntity {
// no templates cache for this class, each owner gets its own instance
def isDocTemplate = false
lazy val definitionName = optimize(inDefinitionTemplates.head.qualifiedName + "." + name)
def valueParams: List[List[ValueParam]] = Nil /** TODO, these are now only computed for DocTemplates */
def parentTypes =
if (sym.hasPackageFlag || sym == AnyClass) List() else {
val tps = (this match {
case a: AliasType => sym.tpe.dealias.parents
case a: AbstractType => sym.info.bounds match {
case TypeBounds(lo, RefinedType(parents, decls)) => parents
case TypeBounds(lo, hi) => hi :: Nil
case _ => Nil
}
case _ => sym.tpe.parents
}) map { _.asSeenFrom(sym.thisType, sym) }
makeParentTypes(RefinedType(tps, EmptyScope), Some(this), inTpl)
}
}
/** The instantiation of `TemplateImpl` triggers the creation of the following entities:
* All ancestors of the template and all non-package members.
*/
abstract class DocTemplateImpl(sym: Symbol, inTpl: DocTemplateImpl) extends MemberTemplateImpl(sym, inTpl) with DocTemplateEntity {
assert(!modelFinished, (sym, inTpl))
assert(!(docTemplatesCache isDefinedAt sym), sym)
docTemplatesCache += (sym -> this)
if (settings.verbose)
inform("Creating doc template for " + sym)
override def linkTarget: DocTemplateImpl = this
override def toRoot: List[DocTemplateImpl] = this :: inTpl.toRoot
protected def reprSymbol: Symbol = sym
def inSource = {
val sourceFile = reprSymbol.sourceFile
if (sourceFile != null && !reprSymbol.isSynthetic)
Some((sourceFile, reprSymbol.pos.line))
else
None
}
def sourceUrl = {
def fixPath(s: String) = s.replaceAll("\\\\" + java.io.File.separator, "/")
val assumedSourceRoot = fixPath(settings.sourcepath.value) stripSuffix "/"
if (!settings.docsourceurl.isDefault)
inSource map { case (file, line) =>
val filePathExt = fixPath(file.path).replaceFirst("^" + assumedSourceRoot, "")
val (filePath, fileExt) = filePathExt.splitAt(filePathExt.indexOf(".", filePathExt.lastIndexOf("/")))
val tplOwner = this.inTemplate.qualifiedName
val tplName = this.name
val patches = new Regex("""€\\{(FILE_PATH|FILE_EXT|FILE_PATH_EXT|FILE_LINE|TPL_OWNER|TPL_NAME)\\}""")
def substitute(name: String): String = name match {
case "FILE_PATH" => filePath
case "FILE_EXT" => fileExt
case "FILE_PATH_EXT" => filePathExt
case "FILE_LINE" => line.toString
case "TPL_OWNER" => tplOwner
case "TPL_NAME" => tplName
}
val patchedString = patches.replaceAllIn(settings.docsourceurl.value, m => java.util.regex.Matcher.quoteReplacement(substitute(m.group(1))) )
new java.net.URL(patchedString)
}
else None
}
private def templateAndType(ancestor: Symbol): (TemplateImpl, TypeEntity) = (makeTemplate(ancestor), makeType(reprSymbol.info.baseType(ancestor), this))
lazy val (linearizationTemplates, linearizationTypes) =
(reprSymbol.ancestors map templateAndType).unzip
/* Subclass cache */
private lazy val subClassesCache = (
if (sym == AnyRefClass || sym == AnyClass) null
else mutable.ListBuffer[DocTemplateEntity]()
)
def registerSubClass(sc: DocTemplateEntity): Unit = {
if (subClassesCache != null)
subClassesCache += sc
}
def directSubClasses = if (subClassesCache == null) Nil else subClassesCache.toList
/* Implicitly convertible class cache */
private var implicitlyConvertibleClassesCache: mutable.ListBuffer[(DocTemplateImpl, ImplicitConversionImpl)] = null
def registerImplicitlyConvertibleClass(dtpl: DocTemplateImpl, conv: ImplicitConversionImpl): Unit = {
if (implicitlyConvertibleClassesCache == null)
implicitlyConvertibleClassesCache = mutable.ListBuffer[(DocTemplateImpl, ImplicitConversionImpl)]()
implicitlyConvertibleClassesCache += ((dtpl, conv))
}
def incomingImplicitlyConvertedClasses: List[(DocTemplateImpl, ImplicitConversionImpl)] =
if (implicitlyConvertibleClassesCache == null)
List()
else
implicitlyConvertibleClassesCache.toList
// the implicit conversions are generated lazily, on completeModel
lazy val conversions: List[ImplicitConversionImpl] =
if (settings.docImplicits) makeImplicitConversions(sym, this) else Nil
// members as given by the compiler
lazy val memberSyms = sym.info.members.filter(s => membersShouldDocument(s, this)).toList
// the inherited templates (classes, traits or objects)
val memberSymsLazy = memberSyms.filter(t => templateShouldDocument(t, this) && !inOriginalOwner(t, this))
// the direct members (methods, values, vars, types and directly contained templates)
val memberSymsEager = memberSyms.filter(!memberSymsLazy.contains(_))
// the members generated by the symbols in memberSymsEager
val ownMembers = (memberSymsEager.flatMap(makeMember(_, None, this)))
// all the members that are documented PLUS the members inherited by implicit conversions
var members: List[MemberImpl] = ownMembers
def templates = members collect { case c: TemplateEntity with MemberEntity => c }
def methods = members collect { case d: Def => d }
def values = members collect { case v: Val => v }
def abstractTypes = members collect { case t: AbstractType => t }
def aliasTypes = members collect { case t: AliasType => t }
/**
* This is the final point in the core model creation: no DocTemplates are created after the model has finished, but
* inherited templates and implicit members are added to the members at this point.
*/
def completeModel(): Unit = {
// DFS completion
// since alias types and abstract types have no own members, there's no reason for them to call completeModel
if (!sym.isAliasType && !sym.isAbstractType)
for (member <- members)
member match {
case d: DocTemplateImpl => d.completeModel()
case _ =>
}
members :::= memberSymsLazy.map(modelCreation.createLazyTemplateMember(_, this))
outgoingImplicitlyConvertedClasses
for (pt <- sym.info.parents; parentTemplate <- findTemplateMaybe(pt.typeSymbol)) parentTemplate registerSubClass this
// the members generated by the symbols in memberSymsEager PLUS the members from the usecases
val allMembers = ownMembers ::: ownMembers.flatMap(_.useCaseOf).distinct
implicitsShadowing = makeShadowingTable(allMembers, conversions, this)
// finally, add the members generated by implicit conversions
members :::= conversions.flatMap(_.memberImpls)
}
var implicitsShadowing = Map[MemberEntity, ImplicitMemberShadowing]()
lazy val outgoingImplicitlyConvertedClasses: List[(TemplateEntity, TypeEntity, ImplicitConversionImpl)] =
conversions flatMap (conv =>
if (!implicitExcluded(conv.conversionQualifiedName))
conv.targetTypeComponents map {
case (template, tpe) =>
template match {
case d: DocTemplateImpl if (d != this) => d.registerImplicitlyConvertibleClass(this, conv)
case _ => // nothing
}
(template, tpe, conv)
}
else List()
)
override def isDocTemplate = true
private[this] lazy val companionSymbol =
if (sym.isAliasType || sym.isAbstractType) {
inTpl.sym.info.member(sym.name.toTermName) match {
case NoSymbol => NoSymbol
case s =>
s.info match {
case ot: OverloadedType =>
NoSymbol
case _ =>
// that's to navigate from val Foo: FooExtractor to FooExtractor :)
s.info.resultType.typeSymbol
}
}
}
else
sym.companionSymbol
def companion =
companionSymbol match {
case NoSymbol => None
case comSym if !isEmptyJavaObject(comSym) && (comSym.isClass || comSym.isModule) =>
makeTemplate(comSym) match {
case d: DocTemplateImpl => Some(d)
case _ => None
}
case _ => None
}
def constructors: List[MemberImpl with Constructor] = if (isClass) members collect { case d: Constructor => d } else Nil
def primaryConstructor: Option[MemberImpl with Constructor] = if (isClass) constructors find { _.isPrimary } else None
override def valueParams =
// we don't want params on a class (non case class) signature
if (isCase) primaryConstructor match {
case Some(const) => const.sym.paramss map (_ map (makeValueParam(_, this)))
case None => List()
}
else List.empty
// These are generated on-demand, make sure you don't call them more than once
def inheritanceDiagram = makeInheritanceDiagram(this)
def contentDiagram = makeContentDiagram(this)
def groupSearch[T](extractor: Comment => Option[T]): Option[T] = {
val comments = comment +: linearizationTemplates.collect { case dtpl: DocTemplateImpl => dtpl.comment }
comments.flatten.map(extractor).flatten.headOption orElse {
Option(inTpl) flatMap (_.groupSearch(extractor))
}
}
def groupDescription(group: String): Option[Body] = groupSearch(_.groupDesc.get(group)) orElse { if (group == defaultGroup) defaultGroupDesc else None }
def groupPriority(group: String): Int = groupSearch(_.groupPrio.get(group)) getOrElse { if (group == defaultGroup) defaultGroupPriority else 0 }
def groupName(group: String): String = groupSearch(_.groupNames.get(group)) getOrElse { if (group == defaultGroup) defaultGroupName else group }
}
abstract class PackageImpl(sym: Symbol, inTpl: PackageImpl) extends DocTemplateImpl(sym, inTpl) with Package {
override def inTemplate = inTpl
override def toRoot: List[PackageImpl] = this :: inTpl.toRoot
override def reprSymbol = sym.info.members.find (_.isPackageObject) getOrElse sym
def packages = members collect { case p: PackageImpl if !(droppedPackages contains p) => p }
}
abstract class RootPackageImpl(sym: Symbol) extends PackageImpl(sym, null) with RootPackageEntity
abstract class NonTemplateMemberImpl(sym: Symbol, conversion: Option[ImplicitConversionImpl],
override val useCaseOf: Option[MemberImpl], inTpl: DocTemplateImpl)
extends MemberImpl(sym, inTpl) with NonTemplateMemberEntity {
override lazy val comment = {
def nonRootTemplate(sym: Symbol): Option[DocTemplateImpl] =
if (sym eq RootPackage) None else findTemplateMaybe(sym)
val inRealTpl = conversion match {
case Some(conv) =>
/* Variable precedence order for implicitly added members: Take the variable definitions from ...
* 1. the target of the implicit conversion
* 2. the definition template (owner)
* 3. the current template
*/
nonRootTemplate(conv.toType.typeSymbol).orElse(nonRootTemplate(sym.owner)).getOrElse(inTpl)
case None =>
// This case handles members which were inherited but not implemented or overridden
inTpl
}
thisFactory.comment(commentCarryingSymbol(sym), inRealTpl, inRealTpl)
}
override def inDefinitionTemplates = useCaseOf.fold(super.inDefinitionTemplates)(_.inDefinitionTemplates)
override def qualifiedName = optimize(inTemplate.qualifiedName + "#" + name)
lazy val definitionName = {
val qualifiedName = conversion.fold(inDefinitionTemplates.head.qualifiedName)(_.conversionQualifiedName)
optimize(qualifiedName + "#" + name)
}
def isUseCase = useCaseOf.isDefined
override def byConversion: Option[ImplicitConversionImpl] = conversion
override def isImplicitlyInherited = { assert(modelFinished, "cannot check if implicitly inherited before model is finished"); conversion.isDefined }
override def isShadowedImplicit = isImplicitlyInherited && inTpl.implicitsShadowing.get(this).map(_.isShadowed).getOrElse(false)
override def isAmbiguousImplicit = isImplicitlyInherited && inTpl.implicitsShadowing.get(this).map(_.isAmbiguous).getOrElse(false)
override def isShadowedOrAmbiguousImplicit = isShadowedImplicit || isAmbiguousImplicit
}
abstract class NonTemplateParamMemberImpl(sym: Symbol, conversion: Option[ImplicitConversionImpl],
useCaseOf: Option[MemberImpl], inTpl: DocTemplateImpl)
extends NonTemplateMemberImpl(sym, conversion, useCaseOf, inTpl) {
def valueParams = {
val info = conversion.fold(sym.info)(_.toType memberInfo sym)
info.paramss map { ps => (ps.zipWithIndex) map { case (p, i) =>
if (p.nameString contains "$") makeValueParam(p, inTpl, optimize("arg" + i)) else makeValueParam(p, inTpl)
}}
}
}
abstract class ParameterImpl(val sym: Symbol, val inTpl: TemplateImpl) extends ParameterEntity {
val name = optimize(sym.nameString)
}
private trait AliasImpl {
def sym: Symbol
def inTpl: TemplateImpl
def alias = makeTypeInTemplateContext(sym.tpe.dealias, inTpl, sym)
}
private trait TypeBoundsImpl {
def sym: Symbol
def inTpl: TemplateImpl
def lo = sym.info.bounds match {
case TypeBounds(lo, hi) if lo.typeSymbol != NothingClass =>
Some(makeTypeInTemplateContext(appliedType(lo, sym.info.typeParams map {_.tpe}), inTpl, sym))
case _ => None
}
def hi = sym.info.bounds match {
case TypeBounds(lo, hi) if hi.typeSymbol != AnyClass =>
Some(makeTypeInTemplateContext(appliedType(hi, sym.info.typeParams map {_.tpe}), inTpl, sym))
case _ => None
}
}
trait HigherKindedImpl extends HigherKinded {
def sym: Symbol
def inTpl: TemplateImpl
def typeParams =
sym.typeParams map (makeTypeParam(_, inTpl))
}
/* ============== MAKER METHODS ============== */
/** This method makes it easier to work with the different kinds of symbols created by scalac by stripping down the
* package object abstraction and placing members directly in the package.
*
* Here's the explanation of what we do. The code:
* {{{
* package foo {
* object `package` {
* class Bar
* }
* }
* }}}
* will yield this Symbol structure:
* <pre>
* +---------+ (2)
* | |
* +---------------+ +---------- v ------- | ---+ +--------+ (2)
* | package foo#1 <---(1)---- module class foo#2 | | | |
* +---------------+ | +------------------ | -+ | +------------------- v ---+ |
* | | package object foo#3 <-----(1)---- module class package#4 | |
* | +----------------------+ | | +---------------------+ | |
* +--------------------------+ | | class package\\$Bar#5 | | |
* | +----------------- | -+ | |
* +------------------- | ---+ |
* | |
* +--------+
* </pre>
* (1) sourceModule
* (2) you get out of owners with .owner
*
* and normalizeTemplate(Bar.owner) will get us the package, instead of the module class of the package object.
*/
def normalizeTemplate(aSym: Symbol): Symbol = aSym match {
case null | rootMirror.EmptyPackage | NoSymbol =>
normalizeTemplate(RootPackage)
case ObjectClass =>
normalizeTemplate(AnyRefClass)
case _ if aSym.isPackageObject =>
normalizeTemplate(aSym.owner)
case _ if aSym.isModuleClass =>
normalizeTemplate(aSym.sourceModule)
case _ =>
aSym
}
/**
* These are all model construction methods. Please do not use them directly, they are calling each other recursively
* starting from makeModel. On the other hand, makeTemplate, makeAnnotation, makeMember, makeType should only be used
* after the model was created (modelFinished=true) otherwise assertions will start failing.
*/
object modelCreation {
def createRootPackage: PackageImpl = docTemplatesCache.get(RootPackage) match {
case Some(root: PackageImpl) => root
case _ => modelCreation.createTemplate(RootPackage, null) match {
case Some(root: PackageImpl) => root
case _ => throw new IllegalStateException("Scaladoc: Unable to create root package!")
}
}
/**
* Create a template, either a package, class, trait or object
*/
def createTemplate(aSym: Symbol, inTpl: DocTemplateImpl): Option[MemberImpl] = {
// don't call this after the model finished!
assert(!modelFinished, (aSym, inTpl))
def createRootPackageComment: Option[Comment] =
if(settings.docRootContent.isDefault) None
else {
import Streamable._
Path(settings.docRootContent.value) match {
case f : File => {
val rootComment = closing(f.inputStream())(is => parse(slurp(is), "", NoPosition, inTpl))
Some(rootComment)
}
case _ => None
}
}
def createDocTemplate(bSym: Symbol, inTpl: DocTemplateImpl): DocTemplateImpl = {
assert(!modelFinished, (bSym, inTpl)) // only created BEFORE the model is finished
if (bSym.isAliasType && bSym != AnyRefClass)
new DocTemplateImpl(bSym, inTpl) with AliasImpl with AliasType { override def isAliasType = true }
else if (bSym.isAbstractType)
new DocTemplateImpl(bSym, inTpl) with TypeBoundsImpl with AbstractType { override def isAbstractType = true }
else if (bSym.isModule)
new DocTemplateImpl(bSym, inTpl) with Object {}
else if (bSym.isTrait)
new DocTemplateImpl(bSym, inTpl) with Trait {}
else if (bSym.isClass && bSym.asClass.baseClasses.contains(AnnotationClass))
new DocTemplateImpl(bSym, inTpl) with model.AnnotationClass {}
else if (bSym.isClass || bSym == AnyRefClass)
new DocTemplateImpl(bSym, inTpl) with Class {}
else
throw new IllegalArgumentException(s"'$bSym' isn't a class, trait or object thus cannot be built as a documentable template.")
}
val bSym = normalizeTemplate(aSym)
if (docTemplatesCache isDefinedAt bSym)
return Some(docTemplatesCache(bSym))
/* Three cases of templates:
* (1) root package -- special cased for bootstrapping
* (2) package
* (3) class/object/trait
*/
if (bSym == RootPackage) // (1)
Some(new RootPackageImpl(bSym) {
override lazy val comment = createRootPackageComment
override val name = "root"
override def inTemplate = this
override def toRoot = this :: Nil
override def qualifiedName = "_root_"
override def isRootPackage = true
override lazy val memberSyms =
(bSym.info.members ++ EmptyPackage.info.members).toList filter { s =>
s != EmptyPackage && s != RootPackage
}
})
else if (bSym.hasPackageFlag) // (2)
if (settings.skipPackage(makeQualifiedName(bSym)))
None
else
inTpl match {
case inPkg: PackageImpl =>
val pack = new PackageImpl(bSym, inPkg) {}
// Used to check package pruning works:
//println(pack.qualifiedName)
if (pack.templates.filter(_.isDocTemplate).isEmpty && pack.memberSymsLazy.isEmpty) {
droppedPackages += pack
None
} else
Some(pack)
case _ =>
throw new IllegalArgumentException(s"'$bSym' must be in a package")
}
else {
// no class inheritance at this point
assert(inOriginalOwner(bSym, inTpl), s"$bSym in $inTpl")
Some(createDocTemplate(bSym, inTpl))
}
}
/**
* After the model is completed, no more DocTemplateEntities are created.
* Therefore any symbol that still appears is:
* - MemberTemplateEntity (created here)
* - NoDocTemplateEntity (created in makeTemplate)
*/
def createLazyTemplateMember(aSym: Symbol, inTpl: DocTemplateImpl): MemberImpl = {
// Code is duplicate because the anonymous classes are created statically
def createNoDocMemberTemplate(bSym: Symbol, inTpl: DocTemplateImpl): MemberTemplateImpl = {
assert(modelFinished, "cannot create NoDocMember template before model is finished") // only created AFTER the model is finished
if (bSym.isModule || (bSym.isAliasType && bSym.tpe.typeSymbol.isModule))
new MemberTemplateImpl(bSym, inTpl) with Object {}
else if (bSym.isTrait || (bSym.isAliasType && bSym.tpe.typeSymbol.isTrait))
new MemberTemplateImpl(bSym, inTpl) with Trait {}
else if (bSym.isClass || (bSym.isAliasType && bSym.tpe.typeSymbol.isClass))
new MemberTemplateImpl(bSym, inTpl) with Class {}
else
throw new IllegalArgumentException(s"'$bSym' isn't a class, trait or object thus cannot be built as a member template.")
}
assert(modelFinished, "cannot create lazy template member before model is finished")
val bSym = normalizeTemplate(aSym)
if (docTemplatesCache isDefinedAt bSym)
docTemplatesCache(bSym)
else
docTemplatesCache.get(bSym.owner) match {
case Some(inTpl) =>
val mbrs = inTpl.members.collect({ case mbr: MemberImpl if mbr.sym == bSym => mbr })
assert(mbrs.length == 1, "must have exactly one member with bSym")
mbrs.head
case _ =>
// move the class completely to the new location
createNoDocMemberTemplate(bSym, inTpl)
}
}
}
// TODO: Should be able to override the type
def makeMember(aSym: Symbol, conversion: Option[ImplicitConversionImpl], inTpl: DocTemplateImpl): List[MemberImpl] = {
def makeMember0(bSym: Symbol, useCaseOf: Option[MemberImpl]): Option[MemberImpl] = {
if (bSym.isGetter && bSym.isLazy)
Some(new NonTemplateMemberImpl(bSym, conversion, useCaseOf, inTpl) with Val {
override def isLazyVal = true
})
else if (bSym.isGetter && bSym.accessed.isMutable)
Some(new NonTemplateMemberImpl(bSym, conversion, useCaseOf, inTpl) with Val {
override def isVar = true
})
else if (bSym.isMethod && !bSym.hasAccessorFlag && !bSym.isConstructor && !bSym.isModule) {
val cSym: Symbol =
if (bSym == definitions.Object_synchronized) modifiedSynchronized
else bSym
Some(new NonTemplateParamMemberImpl(cSym, conversion, useCaseOf, inTpl) with HigherKindedImpl with Def {
override def isDef = true
})
}
else if (bSym.isConstructor)
if (conversion.isDefined || (bSym.enclClass.isAbstract && (bSym.enclClass.isSealed || bSym.enclClass.isFinal)))
// don't list constructors inherited by implicit conversion
// and don't list constructors of abstract sealed types (they cannot be accessed anyway)
None
else
Some(new NonTemplateParamMemberImpl(bSym, conversion, useCaseOf, inTpl) with Constructor {
override def isConstructor = true
def isPrimary = sym.isPrimaryConstructor
})
else if (bSym.isGetter) // Scala field accessor or Java field
Some(new NonTemplateMemberImpl(bSym, conversion, useCaseOf, inTpl) with Val {
override def isVal = true
})
else if (bSym.isAbstractType && !typeShouldDocument(bSym, inTpl))
Some(new MemberTemplateImpl(bSym, inTpl) with TypeBoundsImpl with AbstractType {
override def isAbstractType = true
})
else if (bSym.isAliasType && !typeShouldDocument(bSym, inTpl))
Some(new MemberTemplateImpl(bSym, inTpl) with AliasImpl with AliasType {
override def isAliasType = true
})
else if (!modelFinished && (bSym.hasPackageFlag || templateShouldDocument(bSym, inTpl)))
modelCreation.createTemplate(bSym, inTpl)
else
None
}
if (!localShouldDocument(aSym) || aSym.isModuleClass || aSym.isPackageObject || aSym.isMixinConstructor)
Nil
else {
val allSyms = useCases(aSym, inTpl.sym) map { case (bSym, bComment, bPos) =>
docComments.put(bSym, DocComment(bComment, bPos)) // put the comment in the list, don't parse it yet, closes scala/bug#4898
bSym
}
val member = makeMember0(aSym, None)
if (allSyms.isEmpty)
member.toList
else
// Use cases replace the original definitions - scala/bug#5054
allSyms flatMap { makeMember0(_, member) }
}
}
def findMember(aSym: Symbol, inTpl: DocTemplateImpl): Option[MemberImpl] = {
normalizeTemplate(aSym.owner)
inTpl.members.find(_.sym == aSym)
}
def findTemplateMaybe(aSym: Symbol): Option[DocTemplateImpl] = {
assert(modelFinished, "cannot try to find template before model is finished")
docTemplatesCache.get(normalizeTemplate(aSym)).filterNot(packageDropped(_))
}
def makeTemplate(aSym: Symbol): TemplateImpl = makeTemplate(aSym, None)
def makeTemplate(aSym: Symbol, inTpl: Option[TemplateImpl]): TemplateImpl = {
assert(modelFinished, "cannot make template before model is finished")
def makeNoDocTemplate(aSym: Symbol, inTpl: TemplateImpl): NoDocTemplateImpl =
noDocTemplatesCache.getOrElse(aSym, new NoDocTemplateImpl(aSym, inTpl))
findTemplateMaybe(aSym) getOrElse {
val bSym = normalizeTemplate(aSym)
makeNoDocTemplate(bSym, inTpl getOrElse makeTemplate(bSym.owner))
}
}
def makeAnnotation(annot: AnnotationInfo): scala.tools.nsc.doc.model.Annotation = {
val aSym = annot.symbol
new EntityImpl(aSym, makeTemplate(aSym.owner)) with scala.tools.nsc.doc.model.Annotation {
lazy val annotationClass =
makeTemplate(annot.symbol)
val arguments = {
val paramsOpt: Option[List[ValueParam]] = annotationClass match {
case aClass: DocTemplateEntity with Class =>
val constr = aClass.constructors collectFirst {
case c: MemberImpl if c.sym == annot.original.symbol => c
}
constr flatMap (_.valueParams.headOption)
case _ => None
}
val argTrees = annot.args map makeTree
paramsOpt match {
case Some (params) =>
params zip argTrees map { case (param, tree) =>
new ValueArgument {
def parameter = Some(param)
def value = tree
}
}
case None =>
argTrees map { tree =>
new ValueArgument {
def parameter = None
def value = tree
}
}
}
}
}
}
/** */
def makeTypeParam(aSym: Symbol, inTpl: TemplateImpl): TypeParam =
new ParameterImpl(aSym, inTpl) with TypeBoundsImpl with HigherKindedImpl with TypeParam {
def variance: String = {
if (sym hasFlag Flags.COVARIANT) "+"
else if (sym hasFlag Flags.CONTRAVARIANT) "-"
else ""
}
}
/** */
def makeValueParam(aSym: Symbol, inTpl: DocTemplateImpl): ValueParam = {
makeValueParam(aSym, inTpl, aSym.nameString)
}
/** */
def makeValueParam(aSym: Symbol, inTpl: DocTemplateImpl, newName: String): ValueParam =
new ParameterImpl(aSym, inTpl) with ValueParam {
override val name = newName
def defaultValue =
if (aSym.hasDefault) {
val sourceFile = aSym.sourceFile
// units.filter should return only one element
(currentRun.units filter (_.source.file == sourceFile)).toList match {
case List(unit) =>
// scala/bug#4922 `sym == aSym` is insufficient if `aSym` is a clone of symbol
// of the parameter in the tree, as can happen with type parameterized methods.
def isCorrespondingParam(sym: Symbol) = (
sym != null &&
sym != NoSymbol &&
sym.owner == aSym.owner &&
sym.name == aSym.name &&
sym.isParamWithDefault
)
unit.body find (t => isCorrespondingParam(t.symbol)) collect {
case ValDef(_,_,_,rhs) if rhs ne EmptyTree => makeTree(rhs)
}
case _ => None
}
}
else None
def resultType =
makeTypeInTemplateContext(aSym.tpe, inTpl, aSym)
def isImplicit = aSym.isImplicit
}
/** */
def makeTypeInTemplateContext(aType: Type, inTpl: TemplateImpl, dclSym: Symbol): TypeEntity = {
val tpe = {
def ownerTpl(sym: Symbol): Symbol =
if (sym.isClass || sym.isModule || sym == NoSymbol) sym else ownerTpl(sym.owner)
val fixedSym = if (inTpl.sym.isModule) inTpl.sym.moduleClass else inTpl.sym
aType.asSeenFrom(fixedSym.thisType, ownerTpl(dclSym))
}
makeType(tpe, inTpl)
}
/** Get the types of the parents of the current class, ignoring the refinements */
def makeParentTypes(aType: Type, tpl: Option[MemberTemplateImpl], inTpl: TemplateImpl): List[(TemplateEntity, TypeEntity)] = aType match {
case RefinedType(parents, defs) =>
val ignoreParents = Set[Symbol](AnyClass, AnyRefClass, ObjectClass)
val filtParents =
// we don't want to expose too many links to AnyRef, that will just be redundant information
tpl match {
case Some(tpl) if (!tpl.sym.isModule && parents.length < 2) || (tpl.sym == AnyValClass) || (tpl.sym == AnyRefClass) || (tpl.sym == AnyClass) => parents
case _ => parents.filterNot((p: Type) => ignoreParents(p.typeSymbol))
}
/** Returns:
* - a DocTemplate if the type's symbol is documented
* - a NoDocTemplateMember if the type's symbol is not documented in its parent but in another template
* - a NoDocTemplate if the type's symbol is not documented at all */
def makeTemplateOrMemberTemplate(parent: Type): TemplateImpl = {
def noDocTemplate = makeTemplate(parent.typeSymbol)
findTemplateMaybe(parent.typeSymbol) match {
case Some(tpl) => tpl
case None => parent match {
case TypeRef(pre, sym, args) =>
findTemplateMaybe(pre.typeSymbol) match {
case Some(tpl) => findMember(parent.typeSymbol, tpl).collect({case t: TemplateImpl => t}).getOrElse(noDocTemplate)
case None => noDocTemplate
}
case _ => noDocTemplate
}
}
}
filtParents.map(parent => {
val templateEntity = makeTemplateOrMemberTemplate(parent)
val typeEntity = makeType(parent, inTpl)
(templateEntity, typeEntity)
})
case _ =>
List((makeTemplate(aType.typeSymbol), makeType(aType, inTpl)))
}
def makeQualifiedName(sym: Symbol, relativeTo: Option[Symbol] = None): String = {
val stop = relativeTo map (_.ownerChain.toSet) getOrElse Set[Symbol]()
var sym1 = sym
val path = new StringBuilder()
// var path = List[Symbol]()
while ((sym1 != NoSymbol) && (path.isEmpty || !stop(sym1))) {
val sym1Norm = normalizeTemplate(sym1)
if (!sym1.sourceModule.isPackageObject && sym1Norm != RootPackage) {
if (path.nonEmpty)
path.insert(0, ".")
path.insert(0, sym1Norm.nameString)
// path::= sym1Norm
}
sym1 = sym1.owner
}
optimize(path.toString)
//path.mkString(".")
}
def inOriginalOwner(aSym: Symbol, inTpl: TemplateImpl): Boolean =
normalizeTemplate(aSym.owner) == normalizeTemplate(inTpl.sym)
def templateShouldDocument(aSym: Symbol, inTpl: DocTemplateImpl): Boolean =
(aSym.isTrait || aSym.isClass || aSym.isModule || typeShouldDocument(aSym, inTpl)) &&
localShouldDocument(aSym) &&
!isEmptyJavaObject(aSym) &&
// either it's inside the original owner or we can document it later:
(!inOriginalOwner(aSym, inTpl) || (aSym.isPackageClass || (aSym.sourceFile != null)))
def membersShouldDocument(sym: Symbol, inTpl: TemplateImpl) = {
// pruning modules that shouldn't be documented
// Why Symbol.isInitialized? Well, because we need to avoid exploring all the space available to scaladoc
// from the classpath -- scaladoc is a hog, it will explore everything starting from the root package unless we
// somehow prune the tree. And isInitialized is a good heuristic for pruning -- if the package was not explored
// during typer and refchecks, it's not necessary for the current application and there's no need to explore it.
(!sym.isModule || sym.moduleClass.isInitialized) &&
// documenting only public and protected members
localShouldDocument(sym) &&
// Only this class's constructors are part of its members; inherited constructors are not.
(!sym.isConstructor || sym.owner == inTpl.sym)
}
def isEmptyJavaObject(aSym: Symbol): Boolean =
aSym.isModule && aSym.isJavaDefined &&
aSym.info.members.exists(s => localShouldDocument(s) && (!s.isConstructor || s.owner == aSym))
def localShouldDocument(aSym: Symbol): Boolean = {
// For `private[X]`, isPrivate is false (while for protected[X], isProtected is true)
def isPrivate = aSym.isPrivate || !aSym.isProtected && aSym.privateWithin != NoSymbol
// for private, only document if enabled in settings and not top-level
!aSym.isSynthetic && (!isPrivate || settings.visibilityPrivate.value && !aSym.isTopLevel)
}
// the implicit conversions that are excluded from the pages should not appear in the diagram
def implicitExcluded(convertorMethod: String): Boolean = settings.hiddenImplicits(convertorMethod)
// whether or not to create a page for an {abstract,alias} type
def typeShouldDocument(bSym: Symbol, inTpl: DocTemplateImpl) =
(settings.docExpandAllTypes && (bSym.sourceFile != null)) ||
(bSym.isAliasType || bSym.isAbstractType) &&
{ val rawComment = global.expandedDocComment(bSym, inTpl.sym)
rawComment.contains("@template") || rawComment.contains("@documentable") }
}
object ModelFactory {
// Defaults for member grouping, that may be overridden by the template
val defaultGroup = "Ungrouped"
val defaultGroupName = "Ungrouped"
val defaultGroupDesc = None
val defaultGroupPriority = 1000
}
| scala/scala | src/scaladoc/scala/tools/nsc/doc/model/ModelFactory.scala | Scala | apache-2.0 | 46,820 |
package com.twitter.finagle.util
import com.twitter.conversions.DurationOps._
import org.scalatest.FunSuite
class parsersTest extends FunSuite {
import parsers._
test("double") {
"123.123" match {
case double(123.123) =>
case _ => fail()
}
"abc" match {
case double(_) => fail()
case _ =>
}
}
test("int") {
"123" match {
case int(123) =>
case _ => fail()
}
"abc" match {
case int(_) => fail()
case _ =>
}
}
test("duration") {
"10.seconds" match {
case duration(d) if d == 10.seconds =>
case _ => fail()
}
"10" match {
case duration(_) => fail()
case _ =>
}
}
test("boolean") {
"fALse" match {
case bool(b) => assert(b == false)
case _ => fail()
}
"1" match {
case bool(b) => assert(b)
case _ => fail()
}
"abc" match {
case bool(_) => fail()
case _ =>
}
}
test("long") {
"abc" match {
case long(_) => fail()
case _ =>
}
"2L" match {
case long(2L) =>
case _ => fail()
}
"9223372036854775807" match {
case long(l) => assert(l == Long.MaxValue)
case _ => fail()
}
}
test("longHex") {
"abc" match {
case longHex(result) => assert(result == 2748L && result == 0xabc)
case _ => fail()
}
"0x123" match {
case longHex(result) => assert(result == 291L && result == 0x123)
case _ => fail()
}
"invalid" match {
case longHex(_) => fail()
case _ =>
}
}
test("list") {
"a:b:c" match {
case list("a", "b", "c") =>
case _ => fail()
}
"" match {
case list() =>
case _ => fail()
}
"10.seconds:abc:123.32:999" match {
case list(duration(d), "abc", double(123.32), int(999)) if d == 10.seconds =>
case _ => fail()
}
"foo:bar:baz" match {
case list(elems @ _*) =>
assert(elems == Seq("foo", "bar", "baz"))
case _ => fail()
}
}
}
| luciferous/finagle | finagle-core/src/test/scala/com/twitter/finagle/util/parsersTest.scala | Scala | apache-2.0 | 2,046 |
package by.verkpavel.grafolnet.model
import akka.actor.{Actor, Props}
import by.verkpavel.grafolnet.database.DB
import by.verkpavel.grafolnet.database.domain.Sample
import com.mongodb.casbah.Imports.ObjectId
object ModelActor {
def props: Props = Props[ModelActor]
def name = "model"
case object ItemNotFound
}
class ModelActor extends Actor {
def receive = {
case id: String =>
sender ! DB.getImageByID(id)
case ('delete, id: String) =>
sender ! DB.deleteSampleByID(id)
case (image: Array[Byte], format: String, owner: ObjectId) =>
sender ! DB.addSample(Sample(user_id = owner, imageFormat = format, imageSource = image))
case id: ObjectId =>
sender ! DB.getImages(id)
}
}
| VerkhovtsovPavel/BSUIR_Labs | Diploma/diplom/src/main/scala/by/verkpavel/grafolnet/model/ModelActor.scala | Scala | mit | 733 |
// Solution-1.scala
// Solution to Exercise 1 in "Data Types"
val v1:Int = 5
println(v1)
/* OUTPUT_SHOULD_BE
5
*/ | P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/02_DataTypes/Solution-1.scala | Scala | apache-2.0 | 115 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.join
import org.apache.flink.api.common.state._
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.co.CoProcessFunction
import org.apache.flink.table.api.StreamQueryConfig
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
/**
* Connect data for left stream and right stream. Only use for left or right join without
* non-equal predicates.
*
* @param leftType the input type of left stream
* @param rightType the input type of right stream
* @param genJoinFuncName the function code without any non-equi condition
* @param genJoinFuncCode the function name without any non-equi condition
* @param isLeftJoin the type of join, whether it is the type of left join
* @param queryConfig the configuration for the query to generate
*/
class NonWindowLeftRightJoin(
leftType: TypeInformation[Row],
rightType: TypeInformation[Row],
genJoinFuncName: String,
genJoinFuncCode: String,
isLeftJoin: Boolean,
queryConfig: StreamQueryConfig)
extends NonWindowOuterJoin(
leftType,
rightType,
genJoinFuncName,
genJoinFuncCode,
isLeftJoin,
queryConfig) {
override def open(parameters: Configuration): Unit = {
super.open(parameters)
val joinType = if (isLeftJoin) "Left" else "Right"
LOG.debug(s"Instantiating NonWindow${joinType}OuterJoin")
}
/**
* Puts or Retract an element from the input stream into state and search the other state to
* output records meet the condition. The input row will be preserved and appended with null, if
* there is no match. Records will be expired in state if state retention time has been
* specified.
*/
override def processElement(
value: CRow,
ctx: CoProcessFunction[CRow, CRow, CRow]#Context,
out: Collector[CRow],
currentSideState: MapState[Row, JTuple2[Long, Long]],
otherSideState: MapState[Row, JTuple2[Long, Long]],
recordFromLeft: Boolean): Unit = {
val inputRow = value.row
updateCurrentSide(value, ctx, currentSideState)
cRowWrapper.reset()
cRowWrapper.setCollector(out)
cRowWrapper.setChange(value.change)
// join other side data
if (recordFromLeft == isLeftJoin) {
preservedJoin(inputRow, recordFromLeft, otherSideState)
} else {
retractJoin(value, recordFromLeft, currentSideState, otherSideState)
}
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/runtime/join/NonWindowLeftRightJoin.scala | Scala | apache-2.0 | 3,446 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.api._
import org.apache.flink.table.expressions.FieldReferenceExpression
import org.apache.flink.table.expressions.utils.ApiExpressionUtils.intervalOfMillis
import org.apache.flink.table.planner.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.planner.expressions.PlannerWindowReference
import org.apache.flink.table.planner.functions.sql.FlinkSqlOperatorTable
import org.apache.flink.table.planner.plan.logical.{LogicalWindow, SessionGroupWindow, SlidingGroupWindow, TumblingGroupWindow}
import org.apache.flink.table.planner.plan.nodes.calcite.LogicalWindowAggregate
import org.apache.flink.table.runtime.types.LogicalTypeDataTypeConverter.fromDataTypeToLogicalType
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptRule._
import org.apache.calcite.plan._
import org.apache.calcite.plan.hep.HepRelVertex
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Aggregate.Group
import org.apache.calcite.rel.core.{Aggregate, AggregateCall, RelFactories}
import org.apache.calcite.rel.logical.{LogicalAggregate, LogicalProject}
import org.apache.calcite.rex._
import org.apache.calcite.sql.`type`.SqlTypeUtil
import org.apache.calcite.tools.RelBuilder
import org.apache.calcite.util.ImmutableBitSet
import _root_.scala.collection.JavaConversions._
/**
* Planner rule that transforms simple [[LogicalAggregate]] on a [[LogicalProject]]
* with windowing expression to [[LogicalWindowAggregate]].
*/
abstract class LogicalWindowAggregateRuleBase(description: String)
extends RelOptRule(
operand(classOf[LogicalAggregate],
operand(classOf[LogicalProject], none())),
description) {
override def matches(call: RelOptRuleCall): Boolean = {
val agg: LogicalAggregate = call.rel(0)
val windowExpressions = getWindowExpressions(
agg, trimHep(agg.getInput()).asInstanceOf[LogicalProject])
if (windowExpressions.length > 1) {
throw new TableException("Only a single window group function may be used in GROUP BY")
}
// check if we have grouping sets
val groupSets = agg.getGroupType != Group.SIMPLE
!groupSets && !agg.indicator && windowExpressions.nonEmpty
}
override def onMatch(call: RelOptRuleCall): Unit = {
val builder = call.builder()
val agg: LogicalAggregate = call.rel(0)
val project: LogicalProject = rewriteProctimeWindows(call.rel(1), builder)
val (windowExpr, windowExprIdx) = getWindowExpressions(agg, project).head
val rexBuilder = agg.getCluster.getRexBuilder
val inAggGroupExpression = getInAggregateGroupExpression(rexBuilder, windowExpr)
val newGroupSet = agg.getGroupSet.except(ImmutableBitSet.of(windowExprIdx))
val newProject = builder
.push(project.getInput)
.project(project.getChildExps.updated(windowExprIdx, inAggGroupExpression))
.build()
// translate window against newProject.
val window = translateWindow(windowExpr, windowExprIdx, newProject.getRowType)
// Currently, this rule removes the window from GROUP BY operation which may lead to changes
// of AggCall's type which brings fails on type checks.
// To solve the problem, we change the types to the inferred types in the Aggregate and then
// cast back in the project after Aggregate.
val indexAndTypes = getIndexAndInferredTypesIfChanged(agg)
val finalCalls = adjustTypes(agg, indexAndTypes)
// we don't use the builder here because it uses RelMetadataQuery which affects the plan
val newAgg = LogicalAggregate.create(
newProject,
agg.indicator,
newGroupSet,
ImmutableList.of(newGroupSet),
finalCalls)
val transformed = call.builder()
val windowAgg = LogicalWindowAggregate.create(
window,
Seq[PlannerNamedWindowProperty](),
newAgg)
transformed.push(windowAgg)
// The transformation adds an additional LogicalProject at the top to ensure
// that the types are equivalent.
// 1. ensure group key types, create an additional project to conform with types
val outAggGroupExpression0 = getOutAggregateGroupExpression(rexBuilder, windowExpr)
// fix up the nullability if it is changed.
val outAggGroupExpression = if (windowExpr.getType.isNullable !=
outAggGroupExpression0.getType.isNullable) {
builder.getRexBuilder.makeAbstractCast(
builder.getRexBuilder.matchNullability(outAggGroupExpression0.getType, windowExpr),
outAggGroupExpression0)
} else {
outAggGroupExpression0
}
val projectsEnsureGroupKeyTypes =
transformed.fields.patch(windowExprIdx, Seq(outAggGroupExpression), 0)
// 2. ensure aggCall types
val projectsEnsureAggCallTypes =
projectsEnsureGroupKeyTypes.zipWithIndex.map {
case (aggCall, index) =>
val aggCallIndex = index - agg.getGroupCount
if (indexAndTypes.containsKey(aggCallIndex)) {
rexBuilder.makeCast(agg.getAggCallList.get(aggCallIndex).`type`, aggCall, true)
} else {
aggCall
}
}
transformed.project(projectsEnsureAggCallTypes)
val result = transformed.build()
call.transformTo(result)
}
/** Trim out the HepRelVertex wrapper and get current relational expression. */
private def trimHep(node: RelNode): RelNode = {
node match {
case hepRelVertex: HepRelVertex =>
hepRelVertex.getCurrentRel
case _ => node
}
}
/**
* Rewrite plan with PROCTIME() as window call operand: rewrite the window call to
* reference the input instead of invoke the PROCTIME() directly, in order to simplify the
* subsequent rewrite logic.
*
* For example, plan
* <pre>
* LogicalProject($f0=[TUMBLE(PROCTIME(), 1000:INTERVAL SECOND)], a=[$0], b=[$1])
* +- LogicalTableScan
* </pre>
*
* would be rewritten to
* <pre>
* LogicalProject($f0=[TUMBLE($2, 1000:INTERVAL SECOND)], a=[$0], b=[$1])
* +- LogicalProject(a=[$0], b=[$1], $f2=[PROCTIME()])
* +- LogicalTableScan
* </pre>
*/
private def rewriteProctimeWindows(
project: LogicalProject,
relBuilder: RelBuilder): LogicalProject = {
val projectInput = trimHep(project.getInput)
var hasWindowOnProctimeCall: Boolean = false
val newProjectExprs = project.getChildExps.map {
case call: RexCall if isWindowCall(call) && isProctimeCall(call.getOperands.head) =>
hasWindowOnProctimeCall = true
// Update the window call to reference a RexInputRef instead of a PROCTIME() call.
call.accept(
new RexShuttle {
override def visitCall(call: RexCall): RexNode = {
if (isProctimeCall(call)) {
relBuilder.getRexBuilder.makeInputRef(
call.getType,
// We would project plus an additional PROCTIME() call
// at the end of input projection.
projectInput.getRowType.getFieldCount)
} else {
super.visitCall(call)
}
}
})
case rex: RexNode => rex
}
if (hasWindowOnProctimeCall) {
val newInput = relBuilder
.push(projectInput)
// project plus the PROCTIME() call.
.projectPlus(relBuilder.call(FlinkSqlOperatorTable.PROCTIME))
.build()
// we have to use project factory, because RelBuilder will simplify redundant projects
RelFactories
.DEFAULT_PROJECT_FACTORY
.createProject(newInput, newProjectExprs, project.getRowType.getFieldNames)
.asInstanceOf[LogicalProject]
} else {
project
}
}
/** Decides if the [[RexNode]] is a PROCTIME call. */
def isProctimeCall(rexNode: RexNode): Boolean = rexNode match {
case call: RexCall =>
call.getOperator == FlinkSqlOperatorTable.PROCTIME
case _ => false
}
/** Decides whether the [[RexCall]] is a window call. */
def isWindowCall(call: RexCall): Boolean = call.getOperator match {
case FlinkSqlOperatorTable.SESSION |
FlinkSqlOperatorTable.HOP |
FlinkSqlOperatorTable.TUMBLE => true
case _ => false
}
/**
* Change the types of [[AggregateCall]] to the corresponding inferred types.
*/
private def adjustTypes(
agg: LogicalAggregate,
indexAndTypes: Map[Int, RelDataType]) = {
agg.getAggCallList.zipWithIndex.map {
case (aggCall, index) =>
if (indexAndTypes.containsKey(index)) {
AggregateCall.create(
aggCall.getAggregation,
aggCall.isDistinct,
aggCall.isApproximate,
aggCall.ignoreNulls(),
aggCall.getArgList,
aggCall.filterArg,
aggCall.collation,
agg.getGroupCount,
agg.getInput,
indexAndTypes(index),
aggCall.name)
} else {
aggCall
}
}
}
/**
* Check if there are any types of [[AggregateCall]] that need to be changed. Return the
* [[AggregateCall]] indexes and the corresponding inferred types.
*/
private def getIndexAndInferredTypesIfChanged(
agg: LogicalAggregate)
: Map[Int, RelDataType] = {
agg.getAggCallList.zipWithIndex.flatMap {
case (aggCall, index) =>
val origType = aggCall.`type`
val aggCallBinding = new Aggregate.AggCallBinding(
agg.getCluster.getTypeFactory,
aggCall.getAggregation,
SqlTypeUtil.projectTypes(agg.getInput.getRowType, aggCall.getArgList),
0,
aggCall.hasFilter)
val inferredType = aggCall.getAggregation.inferReturnType(aggCallBinding)
if (origType != inferredType && agg.getGroupCount == 1) {
Some(index, inferredType)
} else {
None
}
}.toMap
}
private[table] def getWindowExpressions(
agg: LogicalAggregate,
project: LogicalProject): Seq[(RexCall, Int)] = {
val groupKeys = agg.getGroupSet
// get grouping expressions
val groupExpr = project.getProjects.zipWithIndex.filter(p => groupKeys.get(p._2))
// filter grouping expressions for window expressions
groupExpr.filter { g =>
g._1 match {
case call: RexCall =>
call.getOperator match {
case FlinkSqlOperatorTable.TUMBLE =>
if (call.getOperands.size() == 2) {
true
} else {
throw new TableException("TUMBLE window with alignment is not supported yet.")
}
case FlinkSqlOperatorTable.HOP =>
if (call.getOperands.size() == 3) {
true
} else {
throw new TableException("HOP window with alignment is not supported yet.")
}
case FlinkSqlOperatorTable.SESSION =>
if (call.getOperands.size() == 2) {
true
} else {
throw new TableException("SESSION window with alignment is not supported yet.")
}
case _ => false
}
case _ => false
}
}.map(w => (w._1.asInstanceOf[RexCall], w._2))
}
/** Returns the expression that replaces the window expression before the aggregation. */
private[table] def getInAggregateGroupExpression(
rexBuilder: RexBuilder,
windowExpression: RexCall): RexNode
/** Returns the expression that replaces the window expression after the aggregation. */
private[table] def getOutAggregateGroupExpression(
rexBuilder: RexBuilder,
windowExpression: RexCall): RexNode
/** translate the group window expression in to a Flink Table window. */
private[table] def translateWindow(
windowExpr: RexCall,
windowExprIdx: Int,
rowType: RelDataType): LogicalWindow = {
val timeField = getTimeFieldReference(windowExpr.getOperands.get(0), windowExprIdx, rowType)
val resultType = Some(fromDataTypeToLogicalType(timeField.getOutputDataType))
val windowRef = PlannerWindowReference("w$", resultType)
windowExpr.getOperator match {
case FlinkSqlOperatorTable.TUMBLE =>
val interval = getOperandAsLong(windowExpr, 1)
TumblingGroupWindow(
windowRef,
timeField,
intervalOfMillis(interval))
case FlinkSqlOperatorTable.HOP =>
val (slide, size) = (getOperandAsLong(windowExpr, 1), getOperandAsLong(windowExpr, 2))
SlidingGroupWindow(
windowRef,
timeField,
intervalOfMillis(size),
intervalOfMillis(slide))
case FlinkSqlOperatorTable.SESSION =>
val gap = getOperandAsLong(windowExpr, 1)
SessionGroupWindow(
windowRef,
timeField,
intervalOfMillis(gap))
}
}
/**
* get time field expression
*/
private[table] def getTimeFieldReference(
operand: RexNode,
windowExprIdx: Int,
rowType: RelDataType): FieldReferenceExpression
/**
* get operand value as Long type
*/
def getOperandAsLong(call: RexCall, idx: Int): Long
}
| gyfora/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/LogicalWindowAggregateRuleBase.scala | Scala | apache-2.0 | 13,997 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.batch
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.streaming.api.transformations.UnionTransformation
import org.apache.flink.table.api.BatchTableEnvironment
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef}
import org.apache.flink.table.plan.nodes.exec.{BatchExecNode, ExecNode}
import org.apache.calcite.plan.{RelOptCluster, RelOptRule, RelTraitSet}
import org.apache.calcite.rel.RelDistribution.Type.{ANY, BROADCAST_DISTRIBUTED, HASH_DISTRIBUTED, RANDOM_DISTRIBUTED, RANGE_DISTRIBUTED, ROUND_ROBIN_DISTRIBUTED, SINGLETON}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.{SetOp, Union}
import org.apache.calcite.rel.{RelNode, RelWriter}
import java.util
import org.apache.flink.api.dag.Transformation
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for [[Union]].
*/
class BatchExecUnion(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRels: util.List[RelNode],
all: Boolean,
outputRowType: RelDataType)
extends Union(cluster, traitSet, inputRels, all)
with BatchPhysicalRel
with BatchExecNode[BaseRow] {
require(all, "Only support union all now")
override def deriveRowType(): RelDataType = outputRowType
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode], all: Boolean): SetOp = {
new BatchExecUnion(cluster, traitSet, inputs, all, outputRowType)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.item("union", getRowType.getFieldNames.mkString(", "))
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
// union will destroy collation trait. So does not handle collation requirement.
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val canSatisfy = requiredDistribution.getType match {
case RANDOM_DISTRIBUTED |
ROUND_ROBIN_DISTRIBUTED |
BROADCAST_DISTRIBUTED |
HASH_DISTRIBUTED => true
// range distribution cannot be satisfied because partition's [lower, upper] of each union
// child may be different.
case RANGE_DISTRIBUTED => false
// singleton cannot cannot be satisfied because singleton exchange limits the parallelism of
// exchange output RelNode to 1.
// Push down Singleton into input of union will destroy the limitation.
case SINGLETON => false
// there is no need to satisfy Any distribution
case ANY => false
}
if (!canSatisfy) {
return None
}
val inputRequiredDistribution = requiredDistribution.getType match {
case RANDOM_DISTRIBUTED | ROUND_ROBIN_DISTRIBUTED | BROADCAST_DISTRIBUTED =>
requiredDistribution
case HASH_DISTRIBUTED =>
// apply strict hash distribution of each child
// to avoid inconsistent of shuffle of each child
FlinkRelDistribution.hash(requiredDistribution.getKeys)
}
val newInputs = getInputs.map(RelOptRule.convert(_, inputRequiredDistribution))
val providedTraitSet = getTraitSet.replace(inputRequiredDistribution)
Some(copy(providedTraitSet, newInputs))
}
//~ ExecNode methods -----------------------------------------------------------
override def getDamBehavior: DamBehavior = DamBehavior.PIPELINED
override def getInputNodes: util.List[ExecNode[BatchTableEnvironment, _]] =
getInputs.map(_.asInstanceOf[ExecNode[BatchTableEnvironment, _]])
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[BatchTableEnvironment, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override def translateToPlanInternal(
tableEnv: BatchTableEnvironment): Transformation[BaseRow] = {
val transformations = getInputNodes.map {
input => input.translateToPlan(tableEnv).asInstanceOf[Transformation[BaseRow]]
}
new UnionTransformation(transformations)
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/batch/BatchExecUnion.scala | Scala | apache-2.0 | 4,932 |
/*
* Cerres (c) 2012-2014 EPFL, Lausanne
*/
package cerres.examples
import ceres.common.{QuadDouble => QD}
import ceres.affine._
//import QD._
object FunctionLibrary {
/* Naming convention: each function name looks like name_type[_derivative]
name can be something like p1 for polynomial 1, type can be d for Double,
dd for DoubleDouble, qd for QuadDouble, aa for affine form,
derivative is optional and denotes if a function is the nth derivative of another one.
*/
// Polynomial 1 has roots: -4, -2, -1, 1, 3
// Computed with doubles, 1e-5: -4.000000692519392, -2.0000000033755176, -0.9999999989838327, 1.0000003092609135, 3.0000003221359677
// Computed with doubles, 1e-10: -4.000000000000564, -2.000000000000001, -1.0, 1.000000000000051, 3.0000000000001132
val p1_d = (x: Double) => (x*x*x*x*x + 3 * x*x*x*x - 11 * x*x*x - 27*x*x +10*x + 24)/20.0
val p1_d_1 = (x: Double) => (5*x*x*x*x + 12*x*x*x - 33*x*x - 54*x + 10)/20.0
val p1_qd = (x: QD) => (x*x*x*x*x + 3 * x*x*x*x - 11 * x*x*x - 27*x*x +10*x + 24)/20.0
val p1_qd_1 = (x: QD) => (5*x*x*x*x + 12*x*x*x - 33*x*x - 54*x + 10)/20.0
val p1_aa = (x: AffineForm) => (x*x*x*x*x + 3 * x*x*x*x - 11 * x*x*x - 27*x*x +10*x + 24)/20.0
val p1_aa_1 = (x: AffineForm) => (5*x*x*x*x + 12*x*x*x - 33*x*x - 54*x + 10)/20.0
// Polynomial 2 has one root at: -1.3247179572447460259609089
// Computed with doubles, 1e-5 : -1.3247190494171253
// Computed with doubles, 1e-10: -1.3247179572458576
val p2_d = (x: Double) => x*x*x - x + 1
val p2_d_1 = (x: Double) => 3 * x*x - 1
val p2_qd = (x: QD) => x*x*x - x + 1
val p2_qd_1 = (x: QD) => 3 * x*x - 1
val p2_aa = (x: AffineForm) => x*x*x - x + 1
val p2_aa_1 = (x: AffineForm) => 3 * x*x - 1
// Polynomial 3 has one root at: 0.4501836112948735730365387
// Computed with doubles, 1e-5 : 0.4501836542045203
// Computed with doubles, 1e-10: 0.4501836112948739
val p3_d = (x: Double) => math.cos(x) - 2 * x
val p3_d_1 = (x: Double) => - math.sin(x) - 2
val p3_qd = (x: QD) => QD.cos(x) - 2 * x
val p3_qd_1 = (x: QD) => - QD.sin(x) - 2
val p3_aa = (x: AffineForm) => AffineForm.cos(x) - 2 * x
val p3_aa_1 = (x: AffineForm) => - AffineForm.sin(x) - 2
// Polynomial 4 has one root at: 3.3333333333333334566914472
// Computed with doubles, 1e-5 : 3.333332841403648
// Computed with doubles, 1e-10: 3.333333333333261
val p4_d = (x: Double) => 1/x - 0.3
val p4_d_1 = (x: Double) => -1.0/(x*x)
val p4_qd = (x: QD) => 1/x - 0.3
val p4_qd_1 = (x: QD) => -1.0/(x*x)
val p4_aa = (x: AffineForm) => 1/x - 0.3
val p4_aa_1 = (x: AffineForm) => -1.0/(x*x)
// Polynomial 5 has two roots: For some reason this diverges?!
// Computed with doubles, 1e-5 :
// Computed with doubles, 1e-10:
val p5_d = (x: Double) => math.exp(x) - 2*x - 0.1
val p5_d_1 = (x: Double) => math.exp(x) - 2
val p5_qd = (x: QD) => QD.exp(x) - 2*x - 0.1
val p5_qd_1 = (x: QD) => QD.exp(x) - 2
val p5_aa = (x: AffineForm) => AffineForm.exp(x) - 2*x - 0.1
val p5_aa_1 = (x: AffineForm) => AffineForm.exp(x) - 2
// Function 6 has one root at 0.0 (diverges with Newton, converges with Secant)
// Computed with doubles, 1e-5 : -1.6999861111258868E-8
// Computed with doubles, 1e-10: 9.926167350636332E-24
// Aaaha: Newton gives us: -5.1020408163265305 (for both tolerances) as a possible root, starting from -5.0
val p6_d = (x: Double) => x * math.exp(-(x*x))
val p6_d_1 = (x: Double) => math.exp(-(x*x)) - 2*x*x*math.exp(-(x*x))
val p6_qd = (x: QD) => x * QD.exp(-(x*x))
val p6_qd_1 = (x: QD) => QD.exp(-(x*x)) - 2*x*x*QD.exp(-(x*x))
val p6_aa = (x: AffineForm) => x * AffineForm.exp(-(x*x))
val p6_aa_1 = (x: AffineForm) => AffineForm.exp(-(x*x)) - 2*x*x*AffineForm.exp(-(x*x))
// Polynomial 7 has 3 roots at 0.75, 1.0 and 2.0
// Computed with doubles, 1e-5 : 0.7499972689032873, 1.000000000940891, 2.0000000004806338
// Computed with doubles, 1e-10: 0.7499999999641983, 0.9999999999999973, 2.0000000000000013
val p7_d = (x: Double) => 4*x*x*x - 15*x*x + 17*x - 6
val p7_d_1 = (x: Double) => 12*x*x - 30*x + 17
val p7_qd = (x: QD) => 4*x*x*x - 15*x*x + 17*x - 6
val p7_qd_1 = (x: QD) => 12*x*x - 30*x + 17
val p7_aa = (x: AffineForm) => 4*x*x*x - 15*x*x + 17*x - 6
val p7_aa_1 = (x: AffineForm) => 12*x*x - 30*x + 17
// Function 8 has 2 roots in [-3.0, Infnt] at -1.3812060997502068247999627, 0.2548500590288502553077938
// Computed with doubles, 1e-5 : -1.3812065650950054, 0.2548500590506985
// Computed with doubles, 1e-10: -1.3812060997502582, 0.25485005902885033
val p8_d = (x: Double) => 3 * math.exp(x) - 4 * math.cos(x)
val p8_d_1 = (x: Double) => 3 * math.exp(x) + 4 * math.sin(x)
val p8_qd = (x: QD) => 3 * QD.exp(x) - 4 * QD.cos(x)
val p8_qd_1 = (x: QD) => 3 * QD.exp(x) + 4 * QD.sin(x)
val p8_aa = (x: AffineForm) => 3 * AffineForm.exp(x) - 4 * AffineForm.cos(x)
val p8_aa_1 = (x: AffineForm) => 3 * AffineForm.exp(x) + 4 * AffineForm.sin(x)
// Polynomial 9 has one root at 0.1999999999999998223643161
// Computed with doubles, 1e-5 : 0.199609375
// Computed with doubles, 1e-10: 0.19999847412109376
val p9_d = (x: Double) => 25*x*x -10*x + 1
val p9_d_1 = (x: Double) => 50*x - 10
val p9_qd = (x: QD) => 25*x*x -10*x + 1
val p9_qd_1 = (x: QD) => 50*x - 10
val p9_aa = (x: AffineForm) => 25*x*x -10*x + 1
val p9_aa_1 = (x: AffineForm) => 50*x - 10
// Polynomial 10 has one root at 0.0
// Computed with doubles, 1e-5 : 1.9635306152277626E-10
// Computed with doubles, 1e-10: 0.0
val p10_d = (x: Double) => math.atan(x)
val p10_d_1 = (x: Double) => 1.0/(1 + x*x)
val p10_qd = (x: QD) => QD.atan(x)
val p10_qd_1 = (x: QD) => 1.0/(1 + x*x)
val p10_aa = (x: AffineForm) => AffineForm.atan(x)
val p10_aa_1 = (x: AffineForm) => 1.0/(1 + x*x)
// Function 11 has 2 roots at -1.2710268008159460640047188, -0.6592660457669460745373486
// Computed with doubles, 1e-5 : -1.2710268031402283, -0.6592654415665319
// Computed with doubles, 1e-10: -1.271026800815946, -0.6592660457664381
val p11_d = (x: Double) => math.cos(x) + 2*math.sin(x) + x*x
val p11_d_1 = (x: Double) => -math.sin(x) + 2*math.cos(x) + 2*x
val p11_qd = (x: QD) => QD.cos(x) + 2*QD.sin(x) + x*x
val p11_qd_1 = (x: QD) => -QD.sin(x) + 2*QD.cos(x) + 2*x
val p11_aa = (x: AffineForm) => AffineForm.cos(x) + 2*AffineForm.sin(x) + x*x
val p11_aa_1 = (x: AffineForm) => -AffineForm.sin(x) + 2*AffineForm.cos(x) + 2*x
// Function 12 has 2 roots at 1.4296118247255556122752444, 8.6131694564413985966763966
// Computed with doubles, 1e-5 : 1.4296077757568797, 8.613169478615234
// Computed with doubles, 1e-10: 1.4296118247166327, 8.6131694564414
val p12_d = (x: Double) => 4 * math.log(x) - x
val p12_d_1 = (x: Double) => 4.0/x - 1
val p12_qd = (x: QD) => 4 * QD.log(x) - x
val p12_qd_1 = (x: QD) => 4.0/x - 1
val p12_aa = (x: AffineForm) => 4 * AffineForm.log(x) - x
val p12_aa_1 = (x: AffineForm) => 4.0/x - 1
}
| malyzajko/cassia | applications/src/examples/FunctionLibrary.scala | Scala | bsd-3-clause | 6,989 |
/**
* Log analyzer and summary builder written in Scala built for JVM projects
*
* @package LogAnalyzer
* @copyright Apache V2 License (see LICENSE)
* @url https://github.com/mcross1882/LogAnalyzer
*/
package mcross1882.loganalyzer.test.export
import javax.mail.Transport
import javax.mail.internet.MimeMessage
import mcross1882.loganalyzer.export.EmailExport
import mcross1882.loganalyzer.test.DefaultTestSuite
import org.mockito.Mockito.verify
class EmailExportSpec extends DefaultTestSuite {
private var _transportMock: Transport = mock[Transport]
private var _messageMock: MimeMessage = mock[MimeMessage]
"send" should "write a message to the transport pipeline" in {
val export = buildExport
val content = "The Expected Content"
export.send(content)
verify(_messageMock).setText(content)
verify(_transportMock).sendMessage(_messageMock, _messageMock.getAllRecipients)
}
protected def buildExport = new EmailExport(_transportMock, _messageMock)
} | mcross1882/LogAnalyzer | src/test/scala/mcross1882/loganalyzer/export/EmailExportSpec.scala | Scala | apache-2.0 | 1,060 |
package swe.backend
trait Backend {
def loadFromFileIntoModel( fileName:String )
def loadFromFileIntoModel( fileName:String, lang:String )
def addResourceStatement( s:String, p:String, o:String )
def addLiteralStatement( s:String, p:String, o:String )
def executeSPARQL( query:String ):String
def getModel:String
def getModel( lang:String ):String
}
| flosse/semanticExperiments | backend/src/main/scala/backend/Backend.scala | Scala | gpl-3.0 | 370 |
/*
* Licensed to Cloudera, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy.server.batch
import com.cloudera.hue.livy.server.Session
trait BatchSession extends Session {
}
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/spark/java/livy-server/src/main/scala/com/cloudera/hue/livy/server/batch/BatchSession.scala | Scala | gpl-2.0 | 920 |
package org.jetbrains.plugins.hocon.psi
import com.intellij.extapi.psi.ASTWrapperPsiElement
import com.intellij.lang.ASTNode
object HoconPsiCreator {
import org.jetbrains.plugins.hocon.parser.HoconElementType._
def createElement(ast: ASTNode) = ast.getElementType match {
case Object => new HObject(ast)
case ObjectEntries => new HObjectEntries(ast)
case Include => new HInclude(ast)
case Included => new HIncluded(ast)
case ObjectField => new HObjectField(ast)
case PrefixedField => new HPrefixedField(ast)
case ValuedField => new HValuedField(ast)
case Path => new HPath(ast)
case Key => new HKey(ast)
case Array => new HArray(ast)
case Substitution => new HSubstitution(ast)
case Concatenation => new HConcatenation(ast)
case UnquotedString => new HUnquotedString(ast)
case StringValue => new HStringValue(ast)
case KeyPart => new HKeyPart(ast)
case IncludeTarget => new HIncludeTarget(ast)
case Number => new HNumber(ast)
case Null => new HNull(ast)
case Boolean => new HBoolean(ast)
case _ => new ASTWrapperPsiElement(ast)
}
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/hocon/psi/HoconPsiCreator.scala | Scala | apache-2.0 | 1,123 |
package lila.forum
import lila.db.dsl._
import reactivemongo.api.bson._
private object BSONHandlers {
implicit val CategBSONHandler = Macros.handler[Categ]
implicit val PostEditBSONHandler = Macros.handler[OldVersion]
implicit val PostBSONHandler = Macros.handler[Post]
implicit val TopicBSONHandler = Macros.handler[Topic]
}
| luanlv/lila | modules/forum/src/main/BSONHandlers.scala | Scala | mit | 343 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.mimir
import quasar.precog.util.NumericComparisons
import quasar.yggdrasil.bytecode._
import quasar.yggdrasil.table._
import quasar.time.DateTimeInterval
trait InfixLibModule[M[+ _]] extends ColumnarTableLibModule[M] {
trait InfixLib extends ColumnarTableLib {
import StdLib.{ BoolFrom, DoubleFrom, LongFrom, NumFrom, StrFrom, doubleIsDefined }
object Infix {
val InfixNamespace = Vector("std", "infix")
final def longOk(x: Long, y: Long) = true
final def doubleOk(x: Double, y: Double) = true
final def numOk(x: BigDecimal, y: BigDecimal) = true
final def longNeZero(x: Long, y: Long) = y != 0
final def doubleNeZero(x: Double, y: Double) = y != 0.0
final def numNeZero(x: BigDecimal, y: BigDecimal) = y != 0
class InfixOp2(name: String, longf: (Long, Long) => Long, doublef: (Double, Double) => Double, numf: (BigDecimal, BigDecimal) => BigDecimal)
extends Op2F2(InfixNamespace, name) {
val tpe = BinaryOperationType(JNumberT, JNumberT, JNumberT)
def f2: F2 = CF2P("builtin::infix::op2::" + name) {
case (c1: LongColumn, c2: LongColumn) =>
new LongFrom.LL(c1, c2, longOk, longf)
case (c1: LongColumn, c2: DoubleColumn) =>
new NumFrom.LD(c1, c2, numOk, numf)
case (c1: LongColumn, c2: NumColumn) =>
new NumFrom.LN(c1, c2, numOk, numf)
case (c1: DoubleColumn, c2: LongColumn) =>
new NumFrom.DL(c1, c2, numOk, numf)
case (c1: DoubleColumn, c2: DoubleColumn) =>
new DoubleFrom.DD(c1, c2, doubleOk, doublef)
case (c1: DoubleColumn, c2: NumColumn) =>
new NumFrom.DN(c1, c2, numOk, numf)
case (c1: NumColumn, c2: LongColumn) =>
new NumFrom.NL(c1, c2, numOk, numf)
case (c1: NumColumn, c2: DoubleColumn) =>
new NumFrom.ND(c1, c2, numOk, numf)
case (c1: NumColumn, c2: NumColumn) =>
new NumFrom.NN(c1, c2, numOk, numf)
}
}
val Add = new Op2F2(InfixNamespace, "add") {
val tpe = BinaryOperationType(JType.JRelativeT, JType.JRelativeT, JType.JRelativeT)
def f2: F2 = CF2P("builtin::infix::op2::add") {
case (c1: LongColumn, c2: LongColumn) =>
new LongFrom.LL(c1, c2, longOk, _ + _)
case (c1: LongColumn, c2: DoubleColumn) =>
new NumFrom.LD(c1, c2, numOk, _ + _)
case (c1: LongColumn, c2: NumColumn) =>
new NumFrom.LN(c1, c2, numOk, _ + _)
case (c1: DoubleColumn, c2: LongColumn) =>
new NumFrom.DL(c1, c2, numOk, _ + _)
case (c1: DoubleColumn, c2: DoubleColumn) =>
new DoubleFrom.DD(c1, c2, doubleOk, _ + _)
case (c1: DoubleColumn, c2: NumColumn) =>
new NumFrom.DN(c1, c2, numOk, _ + _)
case (c1: NumColumn, c2: LongColumn) =>
new NumFrom.NL(c1, c2, numOk, _ + _)
case (c1: NumColumn, c2: DoubleColumn) =>
new NumFrom.ND(c1, c2, numOk, _ + _)
case (c1: NumColumn, c2: NumColumn) =>
new NumFrom.NN(c1, c2, numOk, _ + _)
case (c1: OffsetDateTimeColumn, c2: IntervalColumn) =>
new OffsetDateTimeColumn {
def apply(row: Int) = c2(row).addToOffsetDateTime(c1(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: OffsetDateColumn, c2: IntervalColumn) =>
new OffsetDateColumn {
def apply(row: Int) = DateTimeInterval.addToOffsetDate(c1(row), c2(row).period)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isDateLike
}
case (c1: OffsetTimeColumn, c2: IntervalColumn) =>
new OffsetTimeColumn {
def apply(row: Int) = DateTimeInterval.addToOffsetTime(c1(row), c2(row).duration)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isTimeLike
}
case (c1: LocalDateTimeColumn, c2: IntervalColumn) =>
new LocalDateTimeColumn {
def apply(row: Int) = c2(row).addToLocalDateTime(c1(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: LocalDateColumn, c2: IntervalColumn) =>
new LocalDateColumn {
def apply(row: Int) = DateTimeInterval.addToLocalDate(c1(row), c2(row).period)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isDateLike
}
case (c1: LocalTimeColumn, c2: IntervalColumn) =>
new LocalTimeColumn {
def apply(row: Int) = DateTimeInterval.addToLocalTime(c1(row), c2(row).duration)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isTimeLike
}
case (c1: IntervalColumn, c2: OffsetDateTimeColumn) =>
new OffsetDateTimeColumn {
def apply(row: Int) = c1(row).addToOffsetDateTime(c2(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: IntervalColumn, c2: OffsetDateColumn) =>
new OffsetDateColumn {
def apply(row: Int) = DateTimeInterval.addToOffsetDate(c2(row), c1(row).period)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c1(row).isDateLike
}
case (c1: IntervalColumn, c2: OffsetTimeColumn) =>
new OffsetTimeColumn {
def apply(row: Int) = DateTimeInterval.addToOffsetTime(c2(row), c1(row).duration)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c1(row).isTimeLike
}
case (c1: IntervalColumn, c2: LocalDateTimeColumn) =>
new LocalDateTimeColumn {
def apply(row: Int) = c1(row).addToLocalDateTime(c2(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: IntervalColumn, c2: LocalDateColumn) =>
new LocalDateColumn {
def apply(row: Int) = DateTimeInterval.addToLocalDate(c2(row), c1(row).period)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c1(row).isDateLike
}
case (c1: IntervalColumn, c2: LocalTimeColumn) =>
new LocalTimeColumn {
def apply(row: Int) = DateTimeInterval.addToLocalTime(c2(row), c1(row).duration)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c1(row).isTimeLike
}
case (c1: IntervalColumn, c2: IntervalColumn) =>
new IntervalColumn {
def apply(row: Int) = c1(row).plus(c2(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
}
}
val Sub = new Op2F2(InfixNamespace, "subtract") {
val tpe = BinaryOperationType(JType.JAbsoluteT, JType.JRelativeT, JType.JAbsoluteT)
def f2: F2 = CF2P("builtin::infix::op2::subtract") {
case (c1: LongColumn, c2: LongColumn) =>
new LongFrom.LL(c1, c2, longOk, _ - _)
case (c1: LongColumn, c2: DoubleColumn) =>
new NumFrom.LD(c1, c2, numOk, _ - _)
case (c1: LongColumn, c2: NumColumn) =>
new NumFrom.LN(c1, c2, numOk, _ - _)
case (c1: DoubleColumn, c2: LongColumn) =>
new NumFrom.DL(c1, c2, numOk, _ - _)
case (c1: DoubleColumn, c2: DoubleColumn) =>
new DoubleFrom.DD(c1, c2, doubleOk, _ - _)
case (c1: DoubleColumn, c2: NumColumn) =>
new NumFrom.DN(c1, c2, numOk, _ - _)
case (c1: NumColumn, c2: LongColumn) =>
new NumFrom.NL(c1, c2, numOk, _ - _)
case (c1: NumColumn, c2: DoubleColumn) =>
new NumFrom.ND(c1, c2, numOk, _ - _)
case (c1: NumColumn, c2: NumColumn) =>
new NumFrom.NN(c1, c2, numOk, _ - _)
case (c1: OffsetDateTimeColumn, c2: IntervalColumn) =>
new OffsetDateTimeColumn {
def apply(row: Int) = c2(row).subtractFromOffsetDateTime(c1(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: OffsetDateColumn, c2: IntervalColumn) =>
new OffsetDateColumn {
def apply(row: Int) = DateTimeInterval.subtractFromOffsetDate(c1(row), c2(row).period)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isDateLike
}
case (c1: OffsetTimeColumn, c2: IntervalColumn) =>
new OffsetTimeColumn {
def apply(row: Int) = DateTimeInterval.subtractFromOffsetTime(c1(row), c2(row).duration)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isTimeLike
}
case (c1: LocalDateTimeColumn, c2: IntervalColumn) =>
new LocalDateTimeColumn {
def apply(row: Int) = c2(row).subtractFromLocalDateTime(c1(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: LocalDateColumn, c2: IntervalColumn) =>
new LocalDateColumn {
def apply(row: Int) = DateTimeInterval.subtractFromLocalDate(c1(row), c2(row).period)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isDateLike
}
case (c1: LocalTimeColumn, c2: IntervalColumn) =>
new LocalTimeColumn {
def apply(row: Int) = DateTimeInterval.subtractFromLocalTime(c1(row), c2(row).duration)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row) && c2(row).isTimeLike
}
case (c1: LocalDateTimeColumn, c2: LocalDateTimeColumn) =>
new IntervalColumn {
def apply(row: Int) = DateTimeInterval.betweenLocalDateTime(c1(row), c2(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: LocalDateColumn, c2: LocalDateColumn) =>
new IntervalColumn {
def apply(row: Int) =
DateTimeInterval.ofPeriod(DateTimeInterval.betweenLocalDate(c1(row), c2(row)))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: LocalTimeColumn, c2: LocalTimeColumn) =>
new IntervalColumn {
def apply(row: Int) =
DateTimeInterval.ofDuration(DateTimeInterval.betweenLocalTime(c1(row), c2(row)))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: OffsetDateTimeColumn, c2: OffsetDateTimeColumn) =>
new IntervalColumn {
def apply(row: Int) = DateTimeInterval.betweenOffsetDateTime(c1(row), c2(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: OffsetDateColumn, c2: OffsetDateColumn) =>
new IntervalColumn {
def apply(row: Int) =
DateTimeInterval.ofPeriod(DateTimeInterval.betweenOffsetDate(c1(row), c2(row)))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: OffsetTimeColumn, c2: OffsetTimeColumn) =>
new IntervalColumn {
def apply(row: Int) =
DateTimeInterval.ofDuration(DateTimeInterval.betweenOffsetTime(c1(row), c2(row)))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: IntervalColumn, c2: IntervalColumn) =>
new IntervalColumn {
def apply(row: Int) = c1(row).minus(c2(row))
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
}
}
val Mul = new Op2F2(InfixNamespace, "multiply") {
val tpe = BinaryOperationType(JType.JRelativeT, JType.JRelativeT, JType.JRelativeT)
def f2: F2 = CF2P("builtin::infix::op2::multiply") {
case (c1: LongColumn, c2: LongColumn) =>
new LongFrom.LL(c1, c2, longOk, _ * _)
case (c1: LongColumn, c2: DoubleColumn) =>
new NumFrom.LD(c1, c2, numOk, _ * _)
case (c1: LongColumn, c2: NumColumn) =>
new NumFrom.LN(c1, c2, numOk, _ * _)
case (c1: DoubleColumn, c2: LongColumn) =>
new NumFrom.DL(c1, c2, numOk, _ * _)
case (c1: DoubleColumn, c2: DoubleColumn) =>
new DoubleFrom.DD(c1, c2, doubleOk, _ * _)
case (c1: DoubleColumn, c2: NumColumn) =>
new NumFrom.DN(c1, c2, numOk, _ * _)
case (c1: NumColumn, c2: LongColumn) =>
new NumFrom.NL(c1, c2, numOk, _ * _)
case (c1: NumColumn, c2: DoubleColumn) =>
new NumFrom.ND(c1, c2, numOk, _ * _)
case (c1: NumColumn, c2: NumColumn) =>
new NumFrom.NN(c1, c2, numOk, _ * _)
case (c1: LongColumn, c2: IntervalColumn) =>
new IntervalColumn {
def apply(row: Int) = c2(row).multiply(c1(row).toInt)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
case (c1: IntervalColumn, c2: LongColumn) =>
new IntervalColumn {
def apply(row: Int) = c1(row).multiply(c2(row).toInt)
def isDefinedAt(row: Int) = c1.isDefinedAt(row) && c2.isDefinedAt(row)
}
}
}
// div needs to make sure to use Double even for division with longs
val Div = new Op2F2(InfixNamespace, "divide") {
def doublef(x: Double, y: Double) = x / y
val context = java.math.MathContext.DECIMAL128
def numf(x: BigDecimal, y: BigDecimal) = x(context) / y(context)
val tpe = BinaryOperationType(JNumberT, JNumberT, JNumberT)
def f2: F2 = CF2P("builtin::infix::div") {
case (c1: LongColumn, c2: LongColumn) =>
new DoubleFrom.LL(c1, c2, doubleNeZero, doublef)
case (c1: LongColumn, c2: DoubleColumn) =>
new NumFrom.LD(c1, c2, numNeZero, numf)
case (c1: LongColumn, c2: NumColumn) =>
new NumFrom.LN(c1, c2, numNeZero, numf)
case (c1: DoubleColumn, c2: LongColumn) =>
new NumFrom.DL(c1, c2, numNeZero, numf)
case (c1: DoubleColumn, c2: DoubleColumn) =>
new DoubleFrom.DD(c1, c2, doubleNeZero, doublef)
case (c1: DoubleColumn, c2: NumColumn) =>
new NumFrom.DN(c1, c2, numNeZero, numf)
case (c1: NumColumn, c2: LongColumn) =>
new NumFrom.NL(c1, c2, numNeZero, numf)
case (c1: NumColumn, c2: DoubleColumn) =>
new NumFrom.ND(c1, c2, numNeZero, numf)
case (c1: NumColumn, c2: NumColumn) =>
new NumFrom.NN(c1, c2, numNeZero, numf)
}
}
val Mod = new Op2F2(InfixNamespace, "mod") {
val tpe = BinaryOperationType(JNumberT, JNumberT, JNumberT)
def longMod(x: Long, y: Long) = x % y
def doubleMod(x: Double, y: Double) = x % y
def numMod(x: BigDecimal, y: BigDecimal) = x % y
def f2: F2 = CF2P("builtin::infix::mod") {
case (c1: LongColumn, c2: LongColumn) =>
new LongFrom.LL(c1, c2, longNeZero, longMod)
case (c1: LongColumn, c2: DoubleColumn) =>
new NumFrom.LD(c1, c2, numNeZero, numMod)
case (c1: LongColumn, c2: NumColumn) =>
new NumFrom.LN(c1, c2, numNeZero, numMod)
case (c1: DoubleColumn, c2: LongColumn) =>
new NumFrom.DL(c1, c2, numNeZero, numMod)
case (c1: DoubleColumn, c2: DoubleColumn) =>
new DoubleFrom.DD(c1, c2, doubleNeZero, doubleMod)
case (c1: DoubleColumn, c2: NumColumn) =>
new NumFrom.DN(c1, c2, numNeZero, numMod)
case (c1: NumColumn, c2: LongColumn) =>
new NumFrom.NL(c1, c2, numNeZero, numMod)
case (c1: NumColumn, c2: DoubleColumn) =>
new NumFrom.ND(c1, c2, numNeZero, numMod)
case (c1: NumColumn, c2: NumColumn) =>
new NumFrom.NN(c1, c2, numNeZero, numMod)
}
}
// Separate trait for use in MathLib
trait Power {
def cf2pName: String
val tpe = BinaryOperationType(JNumberT, JNumberT, JNumberT)
def defined(x: Double, y: Double) = doubleIsDefined(x) && doubleIsDefined(y)
def f2: F2 = CF2P(cf2pName) {
case (c1: DoubleColumn, c2: DoubleColumn) =>
new DoubleFrom.DD(c1, c2, defined, scala.math.pow)
case (c1: DoubleColumn, c2: LongColumn) =>
new DoubleFrom.DL(c1, c2, defined, scala.math.pow)
case (c1: DoubleColumn, c2: NumColumn) =>
new DoubleFrom.DN(c1, c2, defined, scala.math.pow)
case (c1: LongColumn, c2: DoubleColumn) =>
new DoubleFrom.LD(c1, c2, defined, scala.math.pow)
case (c1: NumColumn, c2: DoubleColumn) =>
new DoubleFrom.ND(c1, c2, defined, scala.math.pow)
case (c1: LongColumn, c2: LongColumn) =>
new DoubleFrom.LL(c1, c2, defined, scala.math.pow)
case (c1: LongColumn, c2: NumColumn) =>
new DoubleFrom.LN(c1, c2, defined, scala.math.pow)
case (c1: NumColumn, c2: LongColumn) =>
new DoubleFrom.NL(c1, c2, defined, scala.math.pow)
case (c1: NumColumn, c2: NumColumn) =>
new DoubleFrom.NN(c1, c2, defined, scala.math.pow)
}
}
object Pow extends Op2F2(InfixNamespace, "pow") with Power {
val cf2pName = "builtin::infix::pow"
}
class CompareOp2(name: String, f: Int => Boolean) extends Op2F2(InfixNamespace, name) {
val tpe = BinaryOperationType(JNumberT, JNumberT, JBooleanT)
import NumericComparisons.compare
def f2: F2 = CF2P("builtin::infix::compare") {
case (c1: LongColumn, c2: LongColumn) =>
new BoolFrom.LL(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: LongColumn, c2: DoubleColumn) =>
new BoolFrom.LD(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: LongColumn, c2: NumColumn) =>
new BoolFrom.LN(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: DoubleColumn, c2: LongColumn) =>
new BoolFrom.DL(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: DoubleColumn, c2: DoubleColumn) =>
new BoolFrom.DD(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: DoubleColumn, c2: NumColumn) =>
new BoolFrom.DN(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: NumColumn, c2: LongColumn) =>
new BoolFrom.NL(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: NumColumn, c2: DoubleColumn) =>
new BoolFrom.ND(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: NumColumn, c2: NumColumn) =>
new BoolFrom.NN(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: StrColumn, c2: StrColumn) =>
new BoolFrom.SS(c1, c2, (x, y) => true, (x, y) => f(x.compareTo(y)))
case (c1: OffsetDateTimeColumn, c2: OffsetDateTimeColumn) =>
new BoolFrom.OdtmOdtm(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: OffsetDateColumn, c2: OffsetDateColumn) =>
new BoolFrom.OdtOdt(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: OffsetTimeColumn, c2: OffsetTimeColumn) =>
new BoolFrom.OtmOtm(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: LocalDateTimeColumn, c2: LocalDateTimeColumn) =>
new BoolFrom.LdtmLdtm(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: LocalDateColumn, c2: LocalDateColumn) =>
new BoolFrom.LdtLdt(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
case (c1: LocalTimeColumn, c2: LocalTimeColumn) =>
new BoolFrom.LtmLtm(c1, c2, (x, y) => true, (x, y) => f(compare(x, y)))
}
}
val Lt = new CompareOp2("lt", _ < 0)
val LtEq = new CompareOp2("lte", _ <= 0)
val Gt = new CompareOp2("gt", _ > 0)
val GtEq = new CompareOp2("gte", _ >= 0)
class BoolOp2(name: String, f: (Boolean, Boolean) => Boolean) extends Op2F2(InfixNamespace, name) {
val tpe = BinaryOperationType(JBooleanT, JBooleanT, JBooleanT)
def f2: F2 = CF2P("builtin::infix::bool") {
case (c1: BoolColumn, c2: BoolColumn) => new BoolFrom.BB(c1, c2, f)
}
}
// TODO find the commonalities and abstract these two
val And = new OpNFN(InfixNamespace, "and") {
val fn: FN = CFNP("builtin::infix::bool::and") {
case List(c: BoolColumn) =>
new BoolColumn {
def isDefinedAt(row: Int): Boolean = c.isDefinedAt(row) && !c(row)
def apply(row: Int): Boolean = c(row)
}
case List(c1: BoolColumn, c2: BoolColumn) =>
new BoolColumn {
def isDefinedAt(row: Int): Boolean = {
if (c1.isDefinedAt(row) && c2.isDefinedAt(row))
true
else if (c1.isDefinedAt(row) && !c2.isDefinedAt(row))
!c1(row)
else if (!c1.isDefinedAt(row) && c2.isDefinedAt(row))
!c2(row)
else
false
}
def apply(row: Int): Boolean = {
if (c1.isDefinedAt(row) && c2.isDefinedAt(row))
c1(row) && c2(row)
else if (c1.isDefinedAt(row) && !c2.isDefinedAt(row))
c1(row)
else if (!c1.isDefinedAt(row) && c2.isDefinedAt(row))
c2(row)
else
false
}
}
}
}
val Or = new OpNFN(InfixNamespace, "or") {
val fn: FN = CFNP("builtin::infix::bool::or") {
case List(c: BoolColumn) =>
new BoolColumn {
def isDefinedAt(row: Int): Boolean = c.isDefinedAt(row) && c(row)
def apply(row: Int): Boolean = c(row)
}
case List(c1: BoolColumn, c2: BoolColumn) =>
new BoolColumn {
def isDefinedAt(row: Int): Boolean = {
if (c1.isDefinedAt(row) && c2.isDefinedAt(row))
true
else if (c1.isDefinedAt(row) && !c2.isDefinedAt(row))
c1(row)
else if (!c1.isDefinedAt(row) && c2.isDefinedAt(row))
c2(row)
else
false
}
def apply(row: Int): Boolean = {
if (c1.isDefinedAt(row) && c2.isDefinedAt(row))
c1(row) || c2(row)
else if (c1.isDefinedAt(row) && !c2.isDefinedAt(row))
c1(row)
else if (!c1.isDefinedAt(row) && c2.isDefinedAt(row))
c2(row)
else
false
}
}
}
}
val concatString = new Op2F2(InfixNamespace, "concatString") {
//@deprecated, see the DEPRECATED comment in StringLib
val tpe = BinaryOperationType(JTextT, JTextT, JTextT)
private def build(c1: StrColumn, c2: StrColumn) =
new StrFrom.SS(c1, c2, _ != null && _ != null, _ + _)
def f2: F2 = CF2P("builtin::infix:concatString") {
case (c1: StrColumn, c2: StrColumn) => build(c1, c2)
}
}
}
}
}
| jedesah/Quasar | mimir/src/main/scala/quasar/mimir/InfixLib.scala | Scala | apache-2.0 | 24,667 |
package almanac.api
import almanac.model.Metric
trait MetricSink extends AutoCloseable {
def send(metrics: Seq[Metric])
override def close(): Unit
}
trait MetricSinkFactory {
def createSink: MetricSink
}
| adcade/almanac-oss | src/main/scala/almanac/api/MetricSink.scala | Scala | mit | 214 |
package score.discord.canti.jdamocks
import java.util
import java.util.concurrent.{ExecutorService, ScheduledExecutorService}
import net.dv8tion.jda.api.entities.*
import net.dv8tion.jda.api.hooks.IEventManager
import net.dv8tion.jda.api.interactions.commands.Command
import net.dv8tion.jda.api.interactions.commands.build.CommandData
import net.dv8tion.jda.api.managers.{AudioManager, DirectAudioController, Presence}
import net.dv8tion.jda.api.requests.{GatewayIntent, RestAction}
import net.dv8tion.jda.api.requests.restaction.{
CommandCreateAction, CommandEditAction, CommandListUpdateAction, GuildAction
}
import net.dv8tion.jda.api.sharding.ShardManager
import net.dv8tion.jda.api.utils.cache.{CacheFlag, CacheView, SnowflakeCacheView}
import net.dv8tion.jda.api.{AccountType, JDA, Permission}
import okhttp3.OkHttpClient
import scala.jdk.CollectionConverters.*
class FakeJda extends JDA:
private var guilds = Map.empty[Long, Guild]
private var _nextId: Long = 123456789900L
def nextId: Long =
_nextId += 1
_nextId
def makeGuild(): FakeGuild =
val guild = FakeGuild(this, nextId)
guilds += guild.getIdLong -> guild
guild
override def getStatus: JDA.Status = ???
override def setEventManager(manager: IEventManager): Unit = ???
override def addEventListener(listeners: Array[? <: Object | Null]): Unit = ???
override def removeEventListener(listeners: Array[? <: Object | Null]): Unit = ???
override def getRegisteredListeners: util.List[AnyRef] = ???
override def createGuild(name: String): GuildAction = ???
override def getAudioManagerCache: CacheView[AudioManager] = ???
override def getUserCache: SnowflakeCacheView[User] = ???
override def getMutualGuilds(users: Array[? <: User]): util.List[Guild] = ???
override def getMutualGuilds(users: util.Collection[User]): util.List[Guild] = ???
override def getGuildCache: SnowflakeCacheView[Guild] = ???
override def getRoleCache: SnowflakeCacheView[Role] = ???
override def getCategoryCache: SnowflakeCacheView[Category] = ???
override def getTextChannelCache: SnowflakeCacheView[TextChannel] =
ScalaSnowflakeCacheView[GuildChannel, TextChannel](
guilds.values
.flatMap(_.getTextChannels.asScala)
.groupBy(_.getIdLong)
.view
.mapValues(_.head)
.toMap,
_.getName
)
override def getVoiceChannelCache: SnowflakeCacheView[VoiceChannel] = ???
override def getPrivateChannelCache: SnowflakeCacheView[PrivateChannel] = ???
override def getEmoteCache: SnowflakeCacheView[Emote] = ???
override def getSelfUser: SelfUser = ???
override def getPresence: Presence = ???
override def getShardInfo: JDA.ShardInfo = ???
override def getToken: String = ???
override def getResponseTotal: Long = ???
override def getMaxReconnectDelay: Int = ???
override def setAutoReconnect(reconnect: Boolean): Unit = ???
override def setRequestTimeoutRetry(retryOnTimeout: Boolean): Unit = ???
override def isAutoReconnect: Boolean = ???
override def isBulkDeleteSplittingEnabled: Boolean = ???
override def shutdown(): Unit = ???
override def shutdownNow(): Unit = ???
override def getAccountType: AccountType = ???
override def getGatewayPing: Long = ???
override def awaitStatus(status: JDA.Status): JDA = ???
override def getRateLimitPool: ScheduledExecutorService = ???
override def getGatewayPool: ScheduledExecutorService = ???
override def getCallbackPool: ExecutorService = ???
override def getHttpClient: OkHttpClient = ???
override def getDirectAudioController: DirectAudioController = ???
override def getStoreChannelCache: SnowflakeCacheView[StoreChannel] = ???
override def getEventManager: IEventManager = ???
override def retrieveApplicationInfo(): RestAction[ApplicationInfo] = ???
override def getInviteUrl(permissions: Array[? <: Permission]): String =
s"https://test.invalid/invite?perms=${permissions.map(_.name).mkString(",")}"
override def getInviteUrl(permissions: util.Collection[Permission]): String = getInviteUrl(
permissions.asScala.toArray
)
override def getShardManager: ShardManager = ???
override def retrieveWebhookById(webhookId: String): RestAction[Webhook] = ???
override def awaitStatus(status: JDA.Status, failOn: Array[? <: JDA.Status]): JDA = ???
override def getUnavailableGuilds: util.Set[String] = ???
override def getGatewayIntents: util.EnumSet[GatewayIntent] = ???
override def unloadUser(userId: Long): Boolean = ???
override def cancelRequests(): Int = ???
override def retrieveUserById(id: Long, update: Boolean): RestAction[User] = ???
override def isUnavailable(guildId: Long): Boolean = ???
override def openPrivateChannelById(userId: Long): RestAction[PrivateChannel] = ???
override def getCacheFlags: util.EnumSet[CacheFlag] = ???
override def retrieveCommands(): RestAction[util.List[Command]] = ???
override def retrieveCommandById(id: String): RestAction[Command] = ???
override def upsertCommand(command: CommandData): CommandCreateAction = ???
override def updateCommands(): CommandListUpdateAction = ???
override def editCommandById(id: String): CommandEditAction = ???
override def deleteCommandById(commandId: String): RestAction[Void] = ???
override def setRequiredScopes(scopes: util.Collection[String]): JDA = ???
override def createGuildFromTemplate(code: String, name: String, icon: Icon): RestAction[Void] =
???
end FakeJda
| ScoreUnder/canti-bot | src/test/scala/score/discord/canti/jdamocks/FakeJda.scala | Scala | agpl-3.0 | 5,510 |
package functors.contravariant
trait Contravariant {
}
| OpenGenus/cosmos | code/design_pattern/src/functional_patterns/functional_patterns/scala/src/main/scala/functors/contravariant/contravariant.scala | Scala | gpl-3.0 | 57 |
// scalac: -opt-warnings
import scala.math.Ordering
/** The heart of the problem - we want to retain the ordering when
* using `++` on sorted maps.
*
* There are 2 `++` overloads - a generic one in traversables and
* a map-specific one in `MapLike` - which knows about the ordering.
*
* The problem here is that the expected return type for the expression
* in which `++` appears drives the decision of the overload that needs
* to be taken.
* The `collection.SortedMap` does not have `++` overridden to return
* `SortedMap`, but `immutable.Map` instead.
* This is why `collection.SortedMap` used to resort to the generic
* `TraversableLike.++` which knows nothing about the ordering.
*
* To avoid `collection.SortedMap`s resort to the more generic `TraversableLike.++`,
* we override the `MapLike.++` overload in `collection.SortedMap` to return
* the proper type `SortedMap`.
*/
object Test {
def main(args: Array[String]): Unit = {
testCollectionSorted()
testImmutableSorted()
}
def testCollectionSorted(): Unit = {
import collection._
val order = implicitly[Ordering[Int]].reverse
var m1: SortedMap[Int, String] = SortedMap.empty[Int, String](order)
var m2: SortedMap[Int, String] = SortedMap.empty[Int, String](order)
m1 ++= List(1 -> "World")
m1 ++= List(2 -> "Hello")
m2 ++= List(4 -> "Bar")
m2 ++= List(5 -> "Foo")
val m3: SortedMap[Int, String] = m1 ++ m2
println(m1)
println(m2)
println(m3)
println(m1 ++ List(3 -> "?"))
}
def testImmutableSorted(): Unit = {
import collection.immutable._
val order = implicitly[Ordering[Int]].reverse
var m1: SortedMap[Int, String] = SortedMap.empty[Int, String](order)
var m2: SortedMap[Int, String] = SortedMap.empty[Int, String](order)
m1 += (1 -> "World")
m1 += (2 -> "Hello")
m2 += (4 -> "Bar")
m2 += (5 -> "Foo")
val m3: SortedMap[Int, String] = m1 ++ m2
println(m1)
println(m2)
println(m3)
println(m1 + (3 -> "?"))
}
}
| lrytz/scala | test/files/run/t3326.scala | Scala | apache-2.0 | 2,038 |
package com.zhranklin.homepage.notice
import com.zhranklin.homepage.JsoupUtil
import com.zhranklin.homepage.Util._
import org.jsoup.Jsoup
import org.jsoup.nodes.{Document, Element}
trait UrlService {
def noticeUrlsFromUrl(url: String): Iterable[NoticeEntry]
}
trait AbstractUrlService extends UrlService {
type E
protected def rawUrlsFromDoc(doc: Document): Iterable[E]
protected def extractFromRawUrl(a: E): NoticeEntry
def noticeUrlsFromUrl(url: String): Iterable[NoticeEntry] = {
val doc = Jsoup.connect(url).get
rawUrlsFromDoc(doc) map extractFromRawUrl
}
}
trait SelectorUrlService
extends AbstractUrlService {
val urlPattern: String
type E = Element
def rawUrlsFromDoc(doc: Document) = doc.select(s"a[href~=$urlPattern]").asScala
def extractFromRawUrl(a: Element) = NoticeEntry(a.attr("abs:href"), Some(a.text))
}
object UniversalUrlService extends UniversalUrlService
trait UniversalUrlService extends UrlService with JsoupUtil {
def noticeUrlsFromUrl(indexUrl: String) = {
println(s"index: $indexUrl")
def getPostFix(url: String) = """(?<=\\.)\\w+$""".r.findFirstIn(url).getOrElse("")
def properGroup(urls: Seq[Element], pre: Int) = {
val counts = groupByPreFix(urls, pre).map(_._2.size)
counts.count(_ > 5) - (urls.size - counts.max) / 18
}
def tryGroup(urls: Seq[Element]) = 1 to 200 takeWhile {properGroup(urls, _) > 0} last
def groupByPreFix(urls: Seq[Element], pre: Int) = urls.groupBy(e ⇒ (e.href.take(pre), getPostFix(e.href)))
def longEnough(urls: Seq[Element]) = urls.map(_.text.length).sum / urls.size.asInstanceOf[Double] > 7
val doc = Jsoup.connect(indexUrl).get
doc.body.select("a[href]").asScala.map(a ⇒ a.html(a.text))
doc.body select "*:not(:has(a[href]))" select "*:not(a[href])" remove
val urls = doc.select("*:last-of-type:nth-of-type(n+5)").asScala
.flatMap(_.parent.select("a[href]").asScala)
.filterNot(_.attr("href").endsWith("/"))
try {
val urlsLongEnough = groupByPreFix(urls, tryGroup(urls)).values.filter(longEnough).flatten.toSeq
val preLen = tryGroup(urlsLongEnough)
val shrunkenPreLen = "^.*?(?=\\\\d+$)".r.findFirstIn(urlsLongEnough.head.href.take(preLen)).map(_.length).getOrElse(preLen)
val ret = groupByPreFix(urlsLongEnough, shrunkenPreLen).values
.filter(_.size > 5).flatten.map(e ⇒ NoticeEntry(e.absHref, Some(e.text)))
println(s"*****\\nret: $ret\\n*****")
ret
} catch {
case e: UnsupportedOperationException ⇒ Nil
case e: Exception ⇒
e.printStackTrace(Console.out)
Nil
}
}
}
| zhranklin/Private_Blog | server/src/main/scala/com/zhranklin/homepage/notice/UrlService.scala | Scala | gpl-3.0 | 2,619 |
/*
* Copyright (c) 2012, 2013, 2014, 2015, 2016 SURFnet BV
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the SURFnet BV nor the names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package models
import java.net.URI
import java.util.Date
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.Period
import scala.xml.NodeSeq.Empty
object Reserve {
val PathComputationAlgorithms = Seq("Chain", "Sequential", "Tree")
}
case class Reserve(
description: Option[String],
startDate: Option[Date],
endDate: Date,
serviceType: String,
source: Port,
destination: Port,
ero: List[String],
bandwidth: Long,
version: Int = 1,
correlationId: String,
replyTo: Option[URI],
requesterNsa: String,
provider: Provider,
globalReservationId: Option[String] = None,
unprotected: Boolean = false,
pathComputationAlgorithm: Option[String] = None
) extends NsiRequest(correlationId, replyTo, requesterNsa, provider, addsTrace = true) {
import NsiRequest._
override def soapActionSuffix = "reserve"
override def nsiV2Body =
<type:reserve>
{ globalReservationIdField }
{ descriptionField }
<criteria version={ version.toString }>
<schedule>
{ startTimeField }
{ endTimeField }
</schedule>
<serviceType>{ serviceType }</serviceType>
{ service }
</criteria>
</type:reserve>
private def startTimeField = startDate match {
case Some(date) => <startTime>{ ISODateTimeFormat.dateTime().print(new DateTime(date)) }</startTime>
case None => Empty
}
private def globalReservationIdField = globalReservationId match {
case Some(g) => <globalReservationId>{ g }</globalReservationId>
case None => <globalReservationId/>
}
private def descriptionField = description match {
case Some(d) => <description>{ d }</description>
case None => Empty
}
private def endTimeField =
<endTime>{ ISODateTimeFormat.dateTime().print(new DateTime(endDate)) }</endTime>
private def eroPresent: Boolean = {
var found = false;
ero.withFilter(x => x.nonEmpty).foreach(x => found = true)
found
}
private def service =
<p2p:p2ps xmlns:p2p={ NsiV2Point2PointNamespace }>
<capacity>{ bandwidth }</capacity>
<directionality>Bidirectional</directionality>
<symmetricPath>true</symmetricPath>
<sourceSTP>{ source.stpId }</sourceSTP>
<destSTP>{ destination.stpId }</destSTP>
{
if (eroPresent)
<ero>
{
var order = -1;
for (member <- ero; if member.nonEmpty) yield <orderedSTP order={ order += 1; order.toString }><stp>{ member }</stp></orderedSTP>
}
</ero>
}
{
if (unprotected)
<parameter type="protection">UNPROTECTED</parameter>
else
<parameter type="protection">PROTECTED</parameter>
}
{
pathComputationAlgorithm.map(x => <parameter type="pathComputationAlgorithm">{x.toUpperCase}</parameter>).orNull
}
</p2p:p2ps>
private def possibleUnprotected =
if (unprotected)
<serviceAttributes>
<guaranteed>
<saml:Attribute
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:basic" Name="sNCP">
<saml:AttributeValue
xsi:type="xs:string" xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
Unprotected
</saml:AttributeValue>
</saml:Attribute>
</guaranteed>
</serviceAttributes>
else
Empty
}
| BandwidthOnDemand/nsi-requester | app/models/Reserve.scala | Scala | bsd-3-clause | 5,123 |
/**
* Created by hdd on 5/27/16.
*/
class CutDown {
def main(args: Array[String]) {
}
}
| 844348677/IBack | Update/src/main/scala/CutDown.scala | Scala | apache-2.0 | 97 |
package com.github.andr83.scalaconfig.instances
import shapeless.labelled.FieldType
import shapeless.{::, HList, HNil, Witness}
import scala.collection.immutable.Map
abstract class RecordToMap[R <: HList] {
def apply(r: R): Map[String, Any]
}
object RecordToMap {
implicit val hnilRecordToMap: RecordToMap[HNil] = new RecordToMap[HNil] {
def apply(r: HNil): Map[String, Any] = Map.empty
}
implicit def hconsRecordToMap[K <: Symbol, V, T <: HList](implicit
wit: Witness.Aux[K],
rtmT: RecordToMap[T]
): RecordToMap[FieldType[K, V] :: T] = new RecordToMap[FieldType[K, V] :: T] {
def apply(r: FieldType[K, V] :: T): Map[String, Any] = rtmT(r.tail) + ((wit.value.name, r.head))
}
}
| andr83/scalaconfig | src/main/scala/com/github/andr83/scalaconfig/instances/RecordToMap.scala | Scala | mit | 711 |
package com.faacets.qalg
package algebra
import scala.{specialized => sp}
import spire.algebra._
import spire.syntax.ring._
import spire.syntax.cfor._
import util._
trait MatVecProduct[M, V] extends Any { self =>
def timesl2(v: V, m: M): V
def timesr2(m: M, v: V): V
}
object MatVecProduct {
def apply[M, V](implicit P: MatVecProduct[M, V]): MatVecProduct[M, V] = P
}
| denisrosset/qalg | core/src/main/scala/qalg/algebra/MatVecProduct.scala | Scala | mit | 377 |
package com.github.wakfudecrypt.types.data
import com.github.wakfudecrypt._
@BinaryDecoder
case class RecycleMachineIeParam(
_0_int32: Int,
_1_int32: Int,
_2_composite: RecycleMachineIeParam_2_composite
)
object RecycleMachineIeParam extends BinaryDataCompanion[RecycleMachineIeParam] {
override val dataId = 85
}
@BinaryDecoder
case class RecycleMachineIeParam_2_composite(
_0_int8: Byte,
_1_int32: Int
)
| jac3km4/wakfudecrypt | types/src/main/scala/com/github/wakfudecrypt/types/data/RecycleMachineIeParam.scala | Scala | mit | 422 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.geotools.data._
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.features.ScalaSimpleFeatureFactory
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CoveringAttributeIndexTest extends Specification with TestWithFeatureType {
sequential
override val spec = "name:String:index=full,age:Integer:index=join,weight:Double:index=join," +
"height:Double,dtg:Date,*geom:Point:srid=4326"
val geom = WKTUtils.read("POINT(45.0 49.0)")
addFeatures({
(0 until 10).map { i =>
val dtg = s"2014-01-1${i}T12:00:00.000Z"
val attrs = Array(s"${i}name$i", s"$i", s"${i * 2.0}", s"${i * 3.0}", dtg, geom)
ScalaSimpleFeatureFactory.buildFeature(sft, attrs, i.toString)
}
})
val joinIndicator = "Join Plan:"
"AttributeIndexStrategy" should {
"support full coverage of attributes" in {
val query = new Query(sftName, ECQL.toFilter("name = '3name3'"))
explain(query).indexOf(joinIndicator) mustEqual(-1)
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features()).toList
features must haveSize(1)
features(0).getAttribute("name") mustEqual("3name3")
features(0).getAttribute("age") mustEqual(3)
features(0).getAttribute("weight") mustEqual(6.0)
features(0).getAttribute("height") mustEqual(9.0)
features(0).getAttribute("dtg").toString must contain("Jan 13")
}
"support transforms in fully covered indices" in {
val query = new Query(sftName, ECQL.toFilter("name = '3name3'"), Array("name", "age", "dtg", "geom"))
explain(query).indexOf(joinIndicator) mustEqual(-1)
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features()).toList
features must haveSize(1)
features(0).getAttribute("name") mustEqual("3name3")
features(0).getAttribute("age") mustEqual(3)
features(0).getAttribute("weight") must beNull
features(0).getAttribute("height") must beNull
features(0).getAttribute("dtg").toString must contain("Jan 13")
}
"support ecql filters in fully covered indices" in {
val query = new Query(sftName, ECQL.toFilter("name >= '3name3' AND height = '9.0'"))
explain(query).indexOf(joinIndicator) mustEqual(-1)
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features()).toList
features must haveSize(1)
features(0).getAttribute("name") mustEqual("3name3")
features(0).getAttribute("age") mustEqual(3)
features(0).getAttribute("weight") mustEqual(6.0)
features(0).getAttribute("height") mustEqual(9.0)
features(0).getAttribute("dtg").toString must contain("Jan 13")
}
"support ecql filters and covering transforms in fully covered indices" in {
val query = new Query(sftName, ECQL.toFilter("name >= '3name3' AND height = '9.0'"),
Array("name", "height", "dtg", "geom"))
explain(query).indexOf(joinIndicator) mustEqual(-1)
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features()).toList
features must haveSize(1)
features(0).getAttribute("name") mustEqual("3name3")
features(0).getAttribute("age") must beNull
features(0).getAttribute("weight") must beNull
features(0).getAttribute("height") mustEqual(9.0)
features(0).getAttribute("dtg").toString must contain("Jan 13")
}
"support ecql filters and non-covering transforms in fully covered indices" in {
val query = new Query(sftName, ECQL.toFilter("name >= '3name3' AND height = '9.0'"),
Array("name", "age", "dtg", "geom"))
explain(query).indexOf(joinIndicator) mustEqual(-1)
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features()).toList
features must haveSize(1)
features(0).getAttribute("name") mustEqual("3name3")
features(0).getAttribute("age") mustEqual(3)
features(0).getAttribute("weight") must beNull
features(0).getAttribute("height") must beNull
features(0).getAttribute("dtg").toString must contain("Jan 13")
}
"support join coverage of attributes" in {
val query = new Query(sftName, ECQL.toFilter("age = '5'"))
explain(query).indexOf(joinIndicator) must beGreaterThan(-1)
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features()).toList
features must haveSize(1)
features(0).getAttribute("name") mustEqual("5name5")
features(0).getAttribute("age") mustEqual(5)
features(0).getAttribute("weight") mustEqual(10.0)
features(0).getAttribute("height") mustEqual(15.0)
features(0).getAttribute("dtg").toString must contain("Jan 15")
}
"be backwards compatible with index spec" in {
val query = new Query(sftName, ECQL.toFilter("weight = '4.0'"))
explain(query).indexOf(joinIndicator) must beGreaterThan(-1)
val features = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures(query).features()).toList
features must haveSize(1)
features(0).getAttribute("name") mustEqual("2name2")
features(0).getAttribute("age") mustEqual(2)
features(0).getAttribute("weight") mustEqual(4.0)
features(0).getAttribute("height") mustEqual(6.0)
features(0).getAttribute("dtg").toString must contain("Jan 12")
}
}
}
| aheyne/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/CoveringAttributeIndexTest.scala | Scala | apache-2.0 | 6,207 |
package com.github.diegopacheco.sandbox.scala.pkge
package object fruits {
val planted = List(apple, plum, banana)
def showFruit(fruit: Fruit) {
println(fruit.name +"s are "+ fruit.color)
}
} | diegopacheco/scala-playground | scala-pkge/src/main/scala/com/github/diegopacheco/sandbox/scala/pkge/package.scala | Scala | unlicense | 226 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.internal
import org.apache.flink.annotation.VisibleForTesting
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.DataSet
import org.apache.flink.api.java.operators.DataSink
import org.apache.flink.core.execution.JobClient
import org.apache.flink.table.api._
import org.apache.flink.table.api.internal.TableResultImpl.PrintStyle
import org.apache.flink.table.calcite.{CalciteParser, FlinkPlannerImpl}
import org.apache.flink.table.catalog._
import org.apache.flink.table.catalog.exceptions.{TableNotExistException => _, _}
import org.apache.flink.table.delegation.Parser
import org.apache.flink.table.expressions._
import org.apache.flink.table.expressions.resolver.lookups.TableReferenceLookup
import org.apache.flink.table.factories.{TableFactoryUtil, TableSinkFactoryContextImpl}
import org.apache.flink.table.functions.{AggregateFunction, ScalarFunction, TableFunction, _}
import org.apache.flink.table.module.{Module, ModuleManager}
import org.apache.flink.table.operations.ddl._
import org.apache.flink.table.operations.utils.OperationTreeBuilder
import org.apache.flink.table.operations.{CatalogQueryOperation, TableSourceQueryOperation, _}
import org.apache.flink.table.planner.{ParserImpl, PlanningConfigurationBuilder}
import org.apache.flink.table.sinks.{BatchSelectTableSink, BatchTableSink, OutputFormatTableSink, OverwritableTableSink, PartitionableTableSink, TableSink, TableSinkUtils}
import org.apache.flink.table.sources.TableSource
import org.apache.flink.table.types.{AbstractDataType, DataType}
import org.apache.flink.table.util.JavaScalaConversionUtil
import org.apache.flink.table.utils.PrintUtils
import org.apache.flink.types.Row
import org.apache.calcite.jdbc.CalciteSchemaBuilder.asRootSchema
import org.apache.calcite.sql.parser.SqlParser
import org.apache.calcite.tools.FrameworkConfig
import _root_.java.lang.{Iterable => JIterable, Long => JLong}
import _root_.java.util.function.{Function => JFunction, Supplier => JSupplier}
import _root_.java.util.{Optional, Collections => JCollections, HashMap => JHashMap, List => JList, Map => JMap}
import _root_.scala.collection.JavaConversions._
import _root_.scala.collection.JavaConverters._
import _root_.scala.util.Try
/**
* The abstract base class for the implementation of batch TableEnvironment.
*
* @param config The configuration of the TableEnvironment
*/
abstract class TableEnvImpl(
val config: TableConfig,
private val catalogManager: CatalogManager,
private val moduleManager: ModuleManager,
private val userClassLoader: ClassLoader)
extends TableEnvironmentInternal {
// Table API/SQL function catalog
private[flink] val functionCatalog: FunctionCatalog =
new FunctionCatalog(config, catalogManager, moduleManager)
// temporary utility until we don't use planner expressions anymore
functionCatalog.setPlannerTypeInferenceUtil(PlannerTypeInferenceUtilImpl.INSTANCE)
// temporary bridge between API and planner
private[flink] val expressionBridge: ExpressionBridge[PlannerExpression] =
new ExpressionBridge[PlannerExpression](PlannerExpressionConverter.INSTANCE)
private def tableLookup: TableReferenceLookup = {
new TableReferenceLookup {
override def lookupTable(name: String): Optional[TableReferenceExpression] = {
JavaScalaConversionUtil
.toJava(
// The TableLookup is used during resolution of expressions and it actually might not
// be an identifier of a table. It might be a reference to some other object such as
// column, local reference etc. This method should return empty optional in such cases
// to fallback for other identifiers resolution.
Try({
val unresolvedIdentifier = UnresolvedIdentifier.of(name)
scanInternal(unresolvedIdentifier)
.map(t => ApiExpressionUtils.tableRef(name, t))
})
.toOption
.flatten)
}
}
}
private[flink] val operationTreeBuilder = OperationTreeBuilder.create(
config,
functionCatalog.asLookup(new JFunction[String, UnresolvedIdentifier] {
override def apply(t: String): UnresolvedIdentifier = parser.parseIdentifier(t)
}),
catalogManager.getDataTypeFactory,
tableLookup,
isStreamingMode)
protected val planningConfigurationBuilder: PlanningConfigurationBuilder =
new PlanningConfigurationBuilder(
config,
functionCatalog,
asRootSchema(new CatalogManagerCalciteSchema(catalogManager, config, isStreamingMode)),
expressionBridge)
private val parser: Parser = new ParserImpl(
catalogManager,
new JSupplier[FlinkPlannerImpl] {
override def get(): FlinkPlannerImpl = getFlinkPlanner
},
// we do not cache the parser in order to use the most up to
// date configuration. Users might change parser configuration in TableConfig in between
// parsing statements
new JSupplier[CalciteParser] {
override def get(): CalciteParser = planningConfigurationBuilder.createCalciteParser()
}
)
catalogManager.setCatalogTableSchemaResolver(new CatalogTableSchemaResolver(parser, false))
def getConfig: TableConfig = config
private val UNSUPPORTED_QUERY_IN_SQL_UPDATE_MSG =
"Unsupported SQL query! sqlUpdate() only accepts a single SQL statement of type " +
"INSERT, CREATE TABLE, DROP TABLE, ALTER TABLE, USE CATALOG, USE [CATALOG.]DATABASE, " +
"CREATE DATABASE, DROP DATABASE, ALTER DATABASE, CREATE FUNCTION, DROP FUNCTION, " +
"ALTER FUNCTION, CREATE VIEW, DROP VIEW."
private val UNSUPPORTED_QUERY_IN_EXECUTE_SQL_MSG =
"Unsupported SQL query! executeSql() only accepts a single SQL statement of type " +
"CREATE TABLE, DROP TABLE, ALTER TABLE, CREATE DATABASE, DROP DATABASE, ALTER DATABASE, " +
"CREATE FUNCTION, DROP FUNCTION, ALTER FUNCTION, USE CATALOG, USE [CATALOG.]DATABASE, " +
"SHOW CATALOGS, SHOW DATABASES, SHOW TABLES, SHOW FUNCTIONS, CREATE VIEW, DROP VIEW, " +
"SHOW VIEWS, INSERT, DESCRIBE."
private def isStreamingMode: Boolean = this match {
case _: BatchTableEnvImpl => false
case _ => true
}
private def isBatchTable: Boolean = !isStreamingMode
override def registerFunction(name: String, function: ScalarFunction): Unit = {
functionCatalog.registerTempSystemScalarFunction(
name,
function)
}
override def createTemporarySystemFunction(
name: String,
functionClass: Class[_ <: UserDefinedFunction])
: Unit = {
val functionInstance = UserDefinedFunctionHelper.instantiateFunction(functionClass)
createTemporarySystemFunction(name, functionInstance)
}
override def createTemporarySystemFunction(
name: String,
functionInstance: UserDefinedFunction)
: Unit = {
functionCatalog.registerTemporarySystemFunction(name, functionInstance, false)
}
override def dropTemporarySystemFunction(name: String): Boolean = {
functionCatalog.dropTemporarySystemFunction(name, true)
}
override def createFunction(
path: String,
functionClass: Class[_ <: UserDefinedFunction])
: Unit = {
createFunction(path, functionClass, ignoreIfExists = false)
}
override def createFunction(
path: String,
functionClass: Class[_ <: UserDefinedFunction],
ignoreIfExists: Boolean)
: Unit = {
val unresolvedIdentifier = parser.parseIdentifier(path)
functionCatalog.registerCatalogFunction(unresolvedIdentifier, functionClass, ignoreIfExists)
}
override def dropFunction(path: String): Boolean = {
val unresolvedIdentifier = parser.parseIdentifier(path)
functionCatalog.dropCatalogFunction(unresolvedIdentifier, true)
}
override def createTemporaryFunction(
path: String,
functionClass: Class[_ <: UserDefinedFunction])
: Unit = {
val functionInstance = UserDefinedFunctionHelper.instantiateFunction(functionClass)
createTemporaryFunction(path, functionInstance)
}
override def createTemporaryFunction(
path: String,
functionInstance: UserDefinedFunction)
: Unit = {
val unresolvedIdentifier = parser.parseIdentifier(path)
functionCatalog.registerTemporaryCatalogFunction(unresolvedIdentifier, functionInstance, false)
}
override def dropTemporaryFunction(path: String): Boolean = {
val unresolvedIdentifier = parser.parseIdentifier(path)
functionCatalog.dropTemporaryCatalogFunction(unresolvedIdentifier, true)
}
/**
* Registers a [[TableFunction]] under a unique name. Replaces already existing
* user-defined functions under this name.
*/
private[flink] def registerTableFunctionInternal[T: TypeInformation](
name: String,
function: TableFunction[T])
: Unit = {
val resultTypeInfo = UserDefinedFunctionHelper
.getReturnTypeOfTableFunction(
function,
implicitly[TypeInformation[T]])
functionCatalog.registerTempSystemTableFunction(
name,
function,
resultTypeInfo)
}
/**
* Registers an [[AggregateFunction]] under a unique name. Replaces already existing
* user-defined functions under this name.
*/
private[flink] def registerAggregateFunctionInternal[T: TypeInformation, ACC: TypeInformation](
name: String,
function: ImperativeAggregateFunction[T, ACC])
: Unit = {
val resultTypeInfo: TypeInformation[T] = UserDefinedFunctionHelper
.getReturnTypeOfAggregateFunction(
function,
implicitly[TypeInformation[T]])
val accTypeInfo: TypeInformation[ACC] = UserDefinedFunctionHelper
.getAccumulatorTypeOfAggregateFunction(
function,
implicitly[TypeInformation[ACC]])
functionCatalog.registerTempSystemAggregateFunction(
name,
function,
resultTypeInfo,
accTypeInfo)
}
override def registerCatalog(catalogName: String, catalog: Catalog): Unit = {
catalogManager.registerCatalog(catalogName, catalog)
}
override def getCatalog(catalogName: String): Optional[Catalog] = {
catalogManager.getCatalog(catalogName)
}
override def loadModule(moduleName: String, module: Module): Unit = {
moduleManager.loadModule(moduleName, module)
}
override def unloadModule(moduleName: String): Unit = {
moduleManager.unloadModule(moduleName)
}
override def getCurrentCatalog: String = {
catalogManager.getCurrentCatalog
}
override def getCurrentDatabase: String = {
catalogManager.getCurrentDatabase
}
override def useCatalog(catalogName: String): Unit = {
catalogManager.setCurrentCatalog(catalogName)
}
override def useDatabase(databaseName: String): Unit = {
catalogManager.setCurrentDatabase(databaseName)
}
override def registerTable(name: String, table: Table): Unit = {
createTemporaryView(UnresolvedIdentifier.of(name), table)
}
protected def parseIdentifier(identifier: String): UnresolvedIdentifier = {
val parser = planningConfigurationBuilder.createCalciteParser()
UnresolvedIdentifier.of(parser.parseIdentifier(identifier).names: _*)
}
override def createTemporaryView(path: String, view: Table): Unit = {
val identifier = parseIdentifier(path)
createTemporaryView(identifier, view)
}
private def createTemporaryView(identifier: UnresolvedIdentifier, view: Table): Unit = {
// check that table belongs to this table environment
if (view.asInstanceOf[TableImpl].getTableEnvironment != this) {
throw new TableException(
"Only table API objects that belong to this TableEnvironment can be registered.")
}
val objectIdentifier = catalogManager.qualifyIdentifier(identifier)
catalogManager.createTemporaryTable(
new QueryOperationCatalogView(view.getQueryOperation),
objectIdentifier,
false)
}
override def fromTableSource(source: TableSource[_]): Table = {
createTable(new TableSourceQueryOperation(source, isBatchTable))
}
/**
* Perform batch or streaming specific validations of the [[TableSource]].
* This method should throw [[ValidationException]] if the [[TableSource]] cannot be used
* in this [[TableEnvironment]].
*
* @param tableSource table source to validate
*/
protected def validateTableSource(tableSource: TableSource[_]): Unit
/**
* Perform batch or streaming specific validations of the [[TableSink]].
* This method should throw [[ValidationException]] if the [[TableSink]] cannot be used
* in this [[TableEnvironment]].
*
* @param tableSink table source to validate
*/
protected def validateTableSink(tableSink: TableSink[_]): Unit
override def registerTableSourceInternal(
name: String,
tableSource: TableSource[_])
: Unit = {
validateTableSource(tableSource)
val unresolvedIdentifier = UnresolvedIdentifier.of(name)
val objectIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier)
// check if a table (source or sink) is registered
getTemporaryTable(objectIdentifier) match {
// check if a table (source or sink) is registered
case Some(table: ConnectorCatalogTable[_, _]) =>
if (table.getTableSource.isPresent) {
// wrapper contains source
throw new TableException(s"Table '$name' already exists. " +
s"Please choose a different name.")
} else {
// wrapper contains only sink (not source)
val sourceAndSink = ConnectorCatalogTable.sourceAndSink(
tableSource,
table.getTableSink.get,
isBatchTable)
catalogManager.dropTemporaryTable(objectIdentifier, false)
catalogManager.createTemporaryTable(
sourceAndSink,
objectIdentifier,
false)
}
// no table is registered
case _ =>
val source = ConnectorCatalogTable.source(tableSource, isBatchTable)
catalogManager.createTemporaryTable(source, objectIdentifier, false)
}
}
override def registerTableSinkInternal(
name: String,
tableSink: TableSink[_])
: Unit = {
// validate
if (tableSink.getTableSchema.getFieldNames.length == 0) {
throw new TableException("Field names must not be empty.")
}
validateTableSink(tableSink)
val unresolvedIdentifier = UnresolvedIdentifier.of(name)
val objectIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier)
// check if a table (source or sink) is registered
getTemporaryTable(objectIdentifier) match {
// table source and/or sink is registered
case Some(table: ConnectorCatalogTable[_, _]) =>
if (table.getTableSink.isPresent) {
// wrapper contains sink
throw new TableException(s"Table '$name' already exists. " +
s"Please choose a different name.")
} else {
// wrapper contains only source (not sink)
val sourceAndSink = ConnectorCatalogTable.sourceAndSink(
table.getTableSource.get,
tableSink,
isBatchTable)
catalogManager.dropTemporaryTable(objectIdentifier, false)
catalogManager.createTemporaryTable(
sourceAndSink,
objectIdentifier,
false)
}
// no table is registered
case _ =>
val sink = ConnectorCatalogTable.sink(tableSink, isBatchTable)
catalogManager.createTemporaryTable(sink, objectIdentifier, false)
}
}
@throws[TableException]
override def scan(tablePath: String*): Table = {
val unresolvedIdentifier = UnresolvedIdentifier.of(tablePath: _*)
scanInternal(unresolvedIdentifier) match {
case Some(table) => createTable(table)
case None => throw new TableException(s"Table '$unresolvedIdentifier' was not found.")
}
}
override def from(path: String): Table = {
val parser = planningConfigurationBuilder.createCalciteParser()
val unresolvedIdentifier = UnresolvedIdentifier.of(parser.parseIdentifier(path).names: _*)
scanInternal(unresolvedIdentifier) match {
case Some(table) => createTable(table)
case None => throw new TableException(s"Table '$unresolvedIdentifier' was not found.")
}
}
private[flink] def scanInternal(identifier: UnresolvedIdentifier)
: Option[CatalogQueryOperation] = {
val objectIdentifier: ObjectIdentifier = catalogManager.qualifyIdentifier(identifier)
JavaScalaConversionUtil.toScala(catalogManager.getTable(objectIdentifier))
.map(t => new CatalogQueryOperation(objectIdentifier, t.getResolvedSchema))
}
override def listModules(): Array[String] = {
moduleManager.listModules().asScala.toArray
}
override def listCatalogs(): Array[String] = {
catalogManager.listCatalogs
.asScala
.toArray
.sorted
}
override def listDatabases(): Array[String] = {
catalogManager.getCatalog(catalogManager.getCurrentCatalog)
.get()
.listDatabases()
.asScala.toArray
}
override def listTables(): Array[String] = {
catalogManager.listTables().asScala
.toArray
.sorted
}
override def listViews(): Array[String] = {
catalogManager.listViews().asScala
.toArray
.sorted
}
override def listTemporaryTables(): Array[String] = {
catalogManager.listTemporaryTables().asScala
.toArray
.sorted
}
override def listTemporaryViews(): Array[String] = {
catalogManager.listTemporaryViews().asScala
.toArray
.sorted
}
override def dropTemporaryTable(path: String): Boolean = {
val parser = planningConfigurationBuilder.createCalciteParser()
val unresolvedIdentifier = UnresolvedIdentifier.of(parser.parseIdentifier(path).names: _*)
val identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier)
try {
catalogManager.dropTemporaryTable(identifier, false)
true
} catch {
case _: Exception => false
}
}
override def dropTemporaryView(path: String): Boolean = {
val parser = planningConfigurationBuilder.createCalciteParser()
val unresolvedIdentifier = UnresolvedIdentifier.of(parser.parseIdentifier(path).names: _*)
val identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier)
try {
catalogManager.dropTemporaryView(identifier, false)
true
} catch {
case _: Exception => false
}
}
override def listUserDefinedFunctions(): Array[String] = functionCatalog.getUserDefinedFunctions
override def listFunctions(): Array[String] = functionCatalog.getFunctions
override def getCompletionHints(statement: String, position: Int): Array[String] = {
val planner = getFlinkPlanner
planner.getCompletionHints(statement, position)
}
override def sqlQuery(query: String): Table = {
val operations = parser.parse(query)
if (operations.size != 1) throw new ValidationException(
"Unsupported SQL query! sqlQuery() only accepts a single SQL query.")
operations.get(0) match {
case op: QueryOperation if !op.isInstanceOf[ModifyOperation] =>
createTable(op)
case _ => throw new ValidationException(
"Unsupported SQL query! sqlQuery() only accepts a single SQL query of type " +
"SELECT, UNION, INTERSECT, EXCEPT, VALUES, and ORDER_BY.")
}
}
override def executeSql(statement: String): TableResult = {
val operations = parser.parse(statement)
if (operations.size != 1) {
throw new TableException(UNSUPPORTED_QUERY_IN_EXECUTE_SQL_MSG)
}
executeOperation(operations.get(0))
}
override def createStatementSet = new StatementSetImpl(this)
override def executeInternal(operations: JList[ModifyOperation]): TableResult = {
val dataSinks = operations.map {
case catalogSinkModifyOperation: CatalogSinkModifyOperation =>
writeToSinkAndTranslate(
catalogSinkModifyOperation.getChild,
InsertOptions(
catalogSinkModifyOperation.getDynamicOptions,
catalogSinkModifyOperation.isOverwrite),
catalogSinkModifyOperation.getTableIdentifier)
case o =>
throw new TableException("Unsupported operation: " + o)
}
val sinkIdentifierNames = extractSinkIdentifierNames(operations)
val jobName = "insert-into_" + String.join(",", sinkIdentifierNames)
try {
val jobClient = execute(dataSinks, jobName)
val builder = TableSchema.builder()
val affectedRowCounts = new Array[JLong](operations.size())
operations.indices.foreach { idx =>
// use sink identifier name as field name
builder.field(sinkIdentifierNames(idx), DataTypes.BIGINT())
affectedRowCounts(idx) = -1L
}
TableResultImpl.builder()
.jobClient(jobClient)
.resultKind(ResultKind.SUCCESS_WITH_CONTENT)
.tableSchema(builder.build())
.data(new InsertResultIterator(jobClient, Row.of(affectedRowCounts: _*), userClassLoader))
.build()
} catch {
case e: Exception =>
throw new TableException("Failed to execute sql", e);
}
}
override def executeInternal(operation: QueryOperation): TableResult = {
val tableSchema = operation.getTableSchema
val tableSink = new BatchSelectTableSink(tableSchema)
val dataSink = writeToSinkAndTranslate(operation, tableSink)
try {
val jobClient = execute(JCollections.singletonList(dataSink), "collect")
val selectResultProvider = tableSink.getSelectResultProvider
selectResultProvider.setJobClient(jobClient)
TableResultImpl.builder
.jobClient(jobClient)
.resultKind(ResultKind.SUCCESS_WITH_CONTENT)
.tableSchema(tableSchema)
.data(selectResultProvider.getResultIterator)
.setPrintStyle(
PrintStyle.tableau(PrintUtils.MAX_COLUMN_WIDTH, PrintUtils.NULL_COLUMN, true, false))
.build
} catch {
case e: Exception =>
throw new TableException("Failed to execute sql", e)
}
}
override def sqlUpdate(stmt: String): Unit = {
val operations = parser.parse(stmt)
if (operations.size != 1) {
throw new TableException(UNSUPPORTED_QUERY_IN_SQL_UPDATE_MSG)
}
val operation = operations.get(0)
operation match {
case op: CatalogSinkModifyOperation =>
insertInto(
createTable(op.getChild),
InsertOptions(op.getStaticPartitions, op.isOverwrite),
op.getTableIdentifier)
case _: CreateTableOperation | _: DropTableOperation | _: AlterTableOperation |
_: CreateViewOperation | _: DropViewOperation |
_: CreateDatabaseOperation | _: DropDatabaseOperation | _: AlterDatabaseOperation |
_: CreateCatalogFunctionOperation | _: CreateTempSystemFunctionOperation |
_: DropCatalogFunctionOperation | _: DropTempSystemFunctionOperation |
_: AlterCatalogFunctionOperation | _: UseCatalogOperation | _: UseDatabaseOperation =>
executeOperation(operation)
case _ => throw new TableException(UNSUPPORTED_QUERY_IN_SQL_UPDATE_MSG)
}
}
private def executeOperation(operation: Operation): TableResult = {
operation match {
case catalogSinkModifyOperation: CatalogSinkModifyOperation =>
executeInternal(JCollections.singletonList[ModifyOperation](catalogSinkModifyOperation))
case createTableOperation: CreateTableOperation =>
if (createTableOperation.isTemporary) {
catalogManager.createTemporaryTable(
createTableOperation.getCatalogTable,
createTableOperation.getTableIdentifier,
createTableOperation.isIgnoreIfExists
)
} else {
catalogManager.createTable(
createTableOperation.getCatalogTable,
createTableOperation.getTableIdentifier,
createTableOperation.isIgnoreIfExists)
}
TableResultImpl.TABLE_RESULT_OK
case dropTableOperation: DropTableOperation =>
if (dropTableOperation.isTemporary) {
catalogManager.dropTemporaryTable(
dropTableOperation.getTableIdentifier,
dropTableOperation.isIfExists)
} else {
catalogManager.dropTable(
dropTableOperation.getTableIdentifier,
dropTableOperation.isIfExists)
}
TableResultImpl.TABLE_RESULT_OK
case alterTableOperation: AlterTableOperation =>
val catalog = getCatalogOrThrowException(
alterTableOperation.getTableIdentifier.getCatalogName)
val exMsg = getDDLOpExecuteErrorMsg(alterTableOperation.asSummaryString)
try {
alterTableOperation match {
case alterTableRenameOp: AlterTableRenameOperation =>
catalog.renameTable(
alterTableRenameOp.getTableIdentifier.toObjectPath,
alterTableRenameOp.getNewTableIdentifier.getObjectName,
false)
case alterTablePropertiesOp: AlterTablePropertiesOperation =>
catalog.alterTable(
alterTablePropertiesOp.getTableIdentifier.toObjectPath,
alterTablePropertiesOp.getCatalogTable,
false)
}
TableResultImpl.TABLE_RESULT_OK
} catch {
case ex: TableNotExistException => throw new ValidationException(exMsg, ex)
case ex: Exception => throw new TableException(exMsg, ex)
}
case createDatabaseOperation: CreateDatabaseOperation =>
val catalog = getCatalogOrThrowException(createDatabaseOperation.getCatalogName)
val exMsg = getDDLOpExecuteErrorMsg(createDatabaseOperation.asSummaryString)
try {
catalog.createDatabase(
createDatabaseOperation.getDatabaseName,
createDatabaseOperation.getCatalogDatabase,
createDatabaseOperation.isIgnoreIfExists)
TableResultImpl.TABLE_RESULT_OK
} catch {
case ex: DatabaseAlreadyExistException => throw new ValidationException(exMsg, ex)
case ex: Exception => throw new TableException(exMsg, ex)
}
case dropDatabaseOperation: DropDatabaseOperation =>
val catalog = getCatalogOrThrowException(dropDatabaseOperation.getCatalogName)
val exMsg = getDDLOpExecuteErrorMsg(dropDatabaseOperation.asSummaryString)
try {
catalog.dropDatabase(
dropDatabaseOperation.getDatabaseName,
dropDatabaseOperation.isIfExists,
dropDatabaseOperation.isCascade)
TableResultImpl.TABLE_RESULT_OK
} catch {
case ex: DatabaseNotEmptyException => throw new ValidationException(exMsg, ex)
case ex: DatabaseNotExistException => throw new ValidationException(exMsg, ex)
case ex: Exception => throw new TableException(exMsg, ex)
}
case alterDatabaseOperation: AlterDatabaseOperation =>
val catalog = getCatalogOrThrowException(alterDatabaseOperation.getCatalogName)
val exMsg = getDDLOpExecuteErrorMsg(alterDatabaseOperation.asSummaryString)
try {
catalog.alterDatabase(
alterDatabaseOperation.getDatabaseName,
alterDatabaseOperation.getCatalogDatabase,
false)
TableResultImpl.TABLE_RESULT_OK
} catch {
case ex: DatabaseNotExistException => throw new ValidationException(exMsg, ex)
case ex: Exception => throw new TableException(exMsg, ex)
}
case createFunctionOperation: CreateCatalogFunctionOperation =>
createCatalogFunction(createFunctionOperation)
case createTempSystemFunctionOperation: CreateTempSystemFunctionOperation =>
createSystemFunction(createTempSystemFunctionOperation)
case dropFunctionOperation: DropCatalogFunctionOperation =>
dropCatalogFunction(dropFunctionOperation)
case dropTempSystemFunctionOperation: DropTempSystemFunctionOperation =>
dropSystemFunction(dropTempSystemFunctionOperation)
case alterFunctionOperation: AlterCatalogFunctionOperation =>
alterCatalogFunction(alterFunctionOperation)
case useCatalogOperation: UseCatalogOperation =>
catalogManager.setCurrentCatalog(useCatalogOperation.getCatalogName)
TableResultImpl.TABLE_RESULT_OK
case useDatabaseOperation: UseDatabaseOperation =>
catalogManager.setCurrentCatalog(useDatabaseOperation.getCatalogName)
catalogManager.setCurrentDatabase(useDatabaseOperation.getDatabaseName)
TableResultImpl.TABLE_RESULT_OK
case _: ShowCatalogsOperation =>
buildShowResult("catalog name", listCatalogs())
case _: ShowCurrentCatalogOperation =>
buildShowResult("current catalog name", Array(catalogManager.getCurrentCatalog))
case _: ShowDatabasesOperation =>
buildShowResult("database name", listDatabases())
case _: ShowCurrentDatabaseOperation =>
buildShowResult("current database name", Array(catalogManager.getCurrentDatabase))
case _: ShowTablesOperation =>
buildShowResult("table name", listTables())
case _: ShowFunctionsOperation =>
buildShowResult("function name", listFunctions())
case createViewOperation: CreateViewOperation =>
if (createViewOperation.isTemporary) {
catalogManager.createTemporaryTable(
createViewOperation.getCatalogView,
createViewOperation.getViewIdentifier,
createViewOperation.isIgnoreIfExists)
} else {
catalogManager.createTable(
createViewOperation.getCatalogView,
createViewOperation.getViewIdentifier,
createViewOperation.isIgnoreIfExists)
}
TableResultImpl.TABLE_RESULT_OK
case dropViewOperation: DropViewOperation =>
if (dropViewOperation.isTemporary) {
catalogManager.dropTemporaryView(
dropViewOperation.getViewIdentifier,
dropViewOperation.isIfExists)
} else {
catalogManager.dropView(
dropViewOperation.getViewIdentifier,
dropViewOperation.isIfExists)
}
TableResultImpl.TABLE_RESULT_OK
case _: ShowViewsOperation =>
buildShowResult("view name", listViews())
case explainOperation: ExplainOperation =>
val explanation = explainInternal(JCollections.singletonList(explainOperation.getChild))
TableResultImpl.builder.
resultKind(ResultKind.SUCCESS_WITH_CONTENT)
.tableSchema(TableSchema.builder.field("result", DataTypes.STRING).build)
.data(JCollections.singletonList(Row.of(explanation)))
.setPrintStyle(PrintStyle.rawContent())
.build
case descOperation: DescribeTableOperation =>
val result = catalogManager.getTable(descOperation.getSqlIdentifier)
if (result.isPresent) {
buildDescribeResult(result.get.getTable.getSchema)
} else {
throw new ValidationException(String.format(
"Table or view with identifier '%s' doesn't exist",
descOperation.getSqlIdentifier.asSummaryString()))
}
case queryOperation: QueryOperation =>
executeInternal(queryOperation)
case _ =>
throw new TableException(UNSUPPORTED_QUERY_IN_EXECUTE_SQL_MSG)
}
}
private def buildShowResult(columnName: String, objects: Array[String]): TableResult = {
val rows = Array.ofDim[Object](objects.length, 1)
objects.zipWithIndex.foreach {
case (obj, i) => rows(i)(0) = obj
}
buildResult(Array(columnName), Array(DataTypes.STRING), rows)
}
private def buildDescribeResult(schema: TableSchema): TableResult = {
val fieldToWatermark =
schema
.getWatermarkSpecs
.map(w => (w.getRowtimeAttribute, w.getWatermarkExpr)).toMap
val fieldToPrimaryKey = new JHashMap[String, String]()
if (schema.getPrimaryKey.isPresent) {
val columns = schema.getPrimaryKey.get.getColumns.asScala
columns.foreach(c => fieldToPrimaryKey.put(c, s"PRI(${columns.mkString(", ")})"))
}
val data = Array.ofDim[Object](schema.getFieldCount, 6)
schema.getTableColumns.asScala.zipWithIndex.foreach {
case (c, i) => {
val logicalType = c.getType.getLogicalType
data(i)(0) = c.getName
data(i)(1) = logicalType.copy(true).asSummaryString()
data(i)(2) = Boolean.box(logicalType.isNullable)
data(i)(3) = fieldToPrimaryKey.getOrDefault(c.getName, null)
data(i)(4) = c.explainExtras().orElse(null)
data(i)(5) = fieldToWatermark.getOrDefault(c.getName, null)
}
}
buildResult(
Array("name", "type", "null", "key", "extras", "watermark"),
Array(DataTypes.STRING, DataTypes.STRING, DataTypes.BOOLEAN, DataTypes.STRING,
DataTypes.STRING, DataTypes.STRING),
data)
}
private def buildResult(
headers: Array[String],
types: Array[DataType],
rows: Array[Array[Object]]): TableResult = {
TableResultImpl.builder()
.resultKind(ResultKind.SUCCESS_WITH_CONTENT)
.tableSchema(
TableSchema.builder().fields(headers, types).build())
.data(rows.map(Row.of(_:_*)).toList)
.build()
}
/** Get catalog from catalogName or throw a ValidationException if the catalog not exists. */
private def getCatalogOrThrowException(catalogName: String): Catalog = {
getCatalog(catalogName)
.orElseThrow(
new JSupplier[Throwable] {
override def get() = new ValidationException(
String.format("Catalog %s does not exist", catalogName))
})
}
private def getDDLOpExecuteErrorMsg(action: String):String = {
String.format("Could not execute %s", action)
}
protected def createTable(tableOperation: QueryOperation): TableImpl = {
TableImpl.createTable(
this,
tableOperation,
operationTreeBuilder,
functionCatalog.asLookup(new JFunction[String, UnresolvedIdentifier] {
override def apply(t: String): UnresolvedIdentifier = parser.parseIdentifier(t)
}))
}
/**
* extract sink identifier names from [[ModifyOperation]]s.
*
* <p>If there are multiple ModifyOperations have same name,
* an index suffix will be added at the end of the name to ensure each name is unique.
*/
private def extractSinkIdentifierNames(operations: JList[ModifyOperation]): JList[String] = {
val tableNameToCount = new JHashMap[String, Int]()
val tableNames = operations.map {
case catalogSinkModifyOperation: CatalogSinkModifyOperation =>
val fullName = catalogSinkModifyOperation.getTableIdentifier.asSummaryString()
tableNameToCount.put(fullName, tableNameToCount.getOrDefault(fullName, 0) + 1)
fullName
case o =>
throw new UnsupportedOperationException("Unsupported operation: " + o)
}
val tableNameToIndex = new JHashMap[String, Int]()
tableNames.map { tableName =>
if (tableNameToCount.get(tableName) == 1) {
tableName
} else {
val index = tableNameToIndex.getOrDefault(tableName, 0) + 1
tableNameToIndex.put(tableName, index)
tableName + "_" + index
}
}
}
/**
* Triggers the program execution.
*/
protected def execute(dataSinks: JList[DataSink[_]], jobName: String): JobClient
/**
* Writes a [[QueryOperation]] to the registered TableSink with insert options,
* and translates them into a [[DataSink]].
*
* Internally, the [[QueryOperation]] is translated into a [[DataSet]]
* and handed over to the [[TableSink]] to write it.
*
* @param queryOperation The [[QueryOperation]] to translate.
* @param insertOptions The insert options for executing sql insert.
* @param sinkIdentifier The name of the registered TableSink.
* @return [[DataSink]] which represents the plan.
*/
private def writeToSinkAndTranslate(
queryOperation: QueryOperation,
insertOptions: InsertOptions,
sinkIdentifier: ObjectIdentifier): DataSink[_] = {
getTableSink(sinkIdentifier) match {
case None =>
throw new TableException(s"No table was registered under the name $sinkIdentifier.")
case Some(tableSink) =>
// validate schema of source table and table sink
TableSinkUtils.validateSink(
insertOptions.staticPartitions,
queryOperation,
sinkIdentifier,
tableSink)
// set static partitions if it is a partitioned table sink
tableSink match {
case partitionableSink: PartitionableTableSink =>
partitionableSink.setStaticPartition(insertOptions.staticPartitions)
case _ =>
}
// set whether to overwrite if it's an OverwritableTableSink
tableSink match {
case overwritableTableSink: OverwritableTableSink =>
overwritableTableSink.setOverwrite(insertOptions.overwrite)
case _ =>
require(!insertOptions.overwrite, "INSERT OVERWRITE requires " +
s"${classOf[OverwritableTableSink].getSimpleName} but actually got " +
tableSink.getClass.getName)
}
// emit the table to the configured table sink
writeToSinkAndTranslate(queryOperation, tableSink)
}
}
/**
* Writes a [[QueryOperation]] to a [[TableSink]],
* and translates them into a [[DataSink]].
*
* Internally, the [[QueryOperation]] is translated into a [[DataSet]]
* and handed over to the [[TableSink]] to write it.
*
* @param queryOperation The [[QueryOperation]] to write.
* @param tableSink The [[TableSink]] to write the [[Table]] to.
* @return [[DataSink]] which represents the plan.
*/
protected def writeToSinkAndTranslate[T](
queryOperation: QueryOperation,
tableSink: TableSink[T]): DataSink[_]
/**
* Add the given [[ModifyOperation]] into the buffer.
*
* @param modifyOperation The [[ModifyOperation]] to add the buffer to.
*/
protected def addToBuffer[T](modifyOperation: ModifyOperation): Unit
override def insertInto(path: String, table: Table): Unit = {
val parser = planningConfigurationBuilder.createCalciteParser()
val unresolvedIdentifier = UnresolvedIdentifier.of(parser.parseIdentifier(path).names: _*)
val objectIdentifier: ObjectIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier)
insertInto(
table,
InsertOptions(new JHashMap[String, String](), overwrite = false),
objectIdentifier)
}
override def insertInto(
table: Table,
sinkPath: String,
sinkPathContinued: String*): Unit = {
val unresolvedIdentifier = UnresolvedIdentifier.of(sinkPath +: sinkPathContinued: _*)
val objectIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier)
insertInto(
table,
InsertOptions(new JHashMap[String, String](), overwrite = false),
objectIdentifier)
}
/** Insert options for executing sql insert. **/
case class InsertOptions(staticPartitions: JMap[String, String], overwrite: Boolean)
/**
* Writes the [[Table]] to a [[TableSink]] that was registered under the specified name.
*
* @param table The table to write to the TableSink.
* @param sinkIdentifier The name of the registered TableSink.
*/
private def insertInto(
table: Table,
insertOptions: InsertOptions,
sinkIdentifier: ObjectIdentifier): Unit = {
val operation = new CatalogSinkModifyOperation(
sinkIdentifier,
table.getQueryOperation,
insertOptions.staticPartitions,
insertOptions.overwrite,
new JHashMap[String, String]())
addToBuffer(operation)
}
override def getParser: Parser = parser
override def getCatalogManager: CatalogManager = catalogManager
protected def getTableSink(modifyOperation: ModifyOperation): TableSink[_] = {
modifyOperation match {
case s: CatalogSinkModifyOperation =>
getTableSink(s.getTableIdentifier) match {
case None =>
throw new TableException(
s"No table was registered under the name ${s.getTableIdentifier}.")
case Some(tableSink) =>
tableSink match {
case _: BatchTableSink[_] => // do nothing
case _: OutputFormatTableSink[_] => // do nothing
case _ =>
throw new TableException(
"BatchTableSink or OutputFormatTableSink required to emit batch Table.")
}
// validate schema of source table and table sink
TableSinkUtils.validateSink(
s.getStaticPartitions,
s.getChild,
s.getTableIdentifier,
tableSink)
// set static partitions if it is a partitioned table sink
tableSink match {
case partitionableSink: PartitionableTableSink =>
partitionableSink.setStaticPartition(s.getStaticPartitions)
case _ =>
}
// set whether to overwrite if it's an OverwritableTableSink
tableSink match {
case overwritableTableSink: OverwritableTableSink =>
overwritableTableSink.setOverwrite(s.isOverwrite)
case _ =>
require(!s.isOverwrite, "INSERT OVERWRITE requires " +
s"${classOf[OverwritableTableSink].getSimpleName} but actually got " +
tableSink.getClass.getName)
}
tableSink
}
case o =>
throw new TableException("Unsupported Operation: " + o.asSummaryString())
}
}
protected def getTableSink(objectIdentifier: ObjectIdentifier): Option[TableSink[_]] = {
val lookupResult = JavaScalaConversionUtil.toScala(catalogManager.getTable(objectIdentifier))
lookupResult
.map(_.getTable) match {
case Some(s) if s.isInstanceOf[ConnectorCatalogTable[_, _]] =>
JavaScalaConversionUtil
.toScala(s.asInstanceOf[ConnectorCatalogTable[_, _]].getTableSink)
case Some(s) if s.isInstanceOf[CatalogTable] =>
val catalog = catalogManager.getCatalog(objectIdentifier.getCatalogName)
val catalogTable = s.asInstanceOf[CatalogTable]
val context = new TableSinkFactoryContextImpl(
objectIdentifier, catalogTable, config.getConfiguration, true,
lookupResult.get.isTemporary)
if (catalog.isPresent && catalog.get().getTableFactory.isPresent) {
val sink = TableFactoryUtil.createTableSinkForCatalogTable(catalog.get(), context)
if (sink.isPresent) {
return Option(sink.get())
}
}
Option(TableFactoryUtil.findAndCreateTableSink(context))
case _ => None
}
}
protected def getTemporaryTable(identifier: ObjectIdentifier): Option[CatalogBaseTable] = {
JavaScalaConversionUtil.toScala(catalogManager.getTable(identifier))
.filter(_.isTemporary)
.map(_.getTable)
}
private def createCatalogFunction(
createFunctionOperation: CreateCatalogFunctionOperation): TableResult = {
val exMsg = getDDLOpExecuteErrorMsg(createFunctionOperation.asSummaryString)
try {
val function = createFunctionOperation.getCatalogFunction
if (createFunctionOperation.isTemporary) {
val exist = functionCatalog.hasTemporaryCatalogFunction(
createFunctionOperation.getFunctionIdentifier);
if (!exist) {
functionCatalog.registerTemporaryCatalogFunction(
UnresolvedIdentifier.of(createFunctionOperation.getFunctionIdentifier.toList),
createFunctionOperation.getCatalogFunction,
false)
} else if (!createFunctionOperation.isIgnoreIfExists) {
throw new ValidationException(
String.format("Temporary catalog function %s is already defined",
createFunctionOperation.getFunctionIdentifier.asSerializableString))
}
} else {
val catalog = getCatalogOrThrowException(
createFunctionOperation.getFunctionIdentifier.getCatalogName)
catalog.createFunction(
createFunctionOperation.getFunctionIdentifier.toObjectPath,
createFunctionOperation.getCatalogFunction,
createFunctionOperation.isIgnoreIfExists)
}
TableResultImpl.TABLE_RESULT_OK
} catch {
case ex: ValidationException => throw ex
case ex: FunctionAlreadyExistException => throw new ValidationException(ex.getMessage, ex)
case ex: Exception => throw new TableException(exMsg, ex)
}
}
private def alterCatalogFunction(
alterFunctionOperation: AlterCatalogFunctionOperation): TableResult = {
val exMsg = getDDLOpExecuteErrorMsg(alterFunctionOperation.asSummaryString)
try {
val function = alterFunctionOperation.getCatalogFunction
if (alterFunctionOperation.isTemporary) {
throw new ValidationException("Alter temporary catalog function is not supported")
} else {
val catalog = getCatalogOrThrowException(
alterFunctionOperation.getFunctionIdentifier.getCatalogName)
catalog.alterFunction(
alterFunctionOperation.getFunctionIdentifier.toObjectPath,
alterFunctionOperation.getCatalogFunction,
alterFunctionOperation.isIfExists)
}
TableResultImpl.TABLE_RESULT_OK
} catch {
case ex: ValidationException => throw ex
case ex: FunctionNotExistException => throw new ValidationException(ex.getMessage, ex)
case ex: Exception => throw new TableException(exMsg, ex)
}
}
private def dropCatalogFunction(
dropFunctionOperation: DropCatalogFunctionOperation): TableResult = {
val exMsg = getDDLOpExecuteErrorMsg(dropFunctionOperation.asSummaryString)
try {
if (dropFunctionOperation.isTemporary) {
functionCatalog.dropTempCatalogFunction(
dropFunctionOperation.getFunctionIdentifier, dropFunctionOperation.isIfExists)
} else {
val catalog = getCatalogOrThrowException(
dropFunctionOperation.getFunctionIdentifier.getCatalogName)
catalog.dropFunction(
dropFunctionOperation.getFunctionIdentifier.toObjectPath,
dropFunctionOperation.isIfExists)
}
TableResultImpl.TABLE_RESULT_OK
} catch {
case ex: ValidationException => throw ex
case ex: FunctionNotExistException => throw new ValidationException(ex.getMessage, ex)
case ex: Exception => throw new TableException(exMsg, ex)
}
}
private def createSystemFunction(
createFunctionOperation: CreateTempSystemFunctionOperation): TableResult = {
val exMsg = getDDLOpExecuteErrorMsg(createFunctionOperation.asSummaryString)
try {
val exist = functionCatalog.hasTemporarySystemFunction(
createFunctionOperation.getFunctionName)
if (!exist) {
functionCatalog.registerTemporarySystemFunction(
createFunctionOperation.getFunctionName,
createFunctionOperation.getFunctionClass,
createFunctionOperation.getFunctionLanguage,
false)
} else if (!createFunctionOperation.isIgnoreIfExists) {
throw new ValidationException(
String.format("Temporary system function %s is already defined",
createFunctionOperation.getFunctionName))
}
TableResultImpl.TABLE_RESULT_OK
} catch {
case e: ValidationException =>
throw e
case e: Exception =>
throw new TableException(exMsg, e)
}
}
private def dropSystemFunction(
dropFunctionOperation: DropTempSystemFunctionOperation): TableResult = {
val exMsg = getDDLOpExecuteErrorMsg(dropFunctionOperation.asSummaryString)
try {
functionCatalog.dropTemporarySystemFunction(
dropFunctionOperation.getFunctionName, dropFunctionOperation.isIfExists)
TableResultImpl.TABLE_RESULT_OK
} catch {
case e: ValidationException =>
throw e
case e: Exception =>
throw new TableException(exMsg, e)
}
}
override def explainSql(statement: String, extraDetails: ExplainDetail*): String = {
val operations = parser.parse(statement)
if (operations.size != 1) {
throw new TableException(
"Unsupported SQL query! explainSql() only accepts a single SQL query.")
}
explainInternal(operations, extraDetails: _*)
}
protected def explainInternal(operations: JList[Operation], extraDetails: ExplainDetail*): String
override def fromValues(values: Expression*): Table = {
createTable(operationTreeBuilder.values(values: _*))
}
override def fromValues(rowType: AbstractDataType[_], values: Expression*): Table = {
val resolvedDataType = catalogManager.getDataTypeFactory.createDataType(rowType)
createTable(operationTreeBuilder.values(resolvedDataType, values: _*))
}
override def fromValues(values: JIterable[_]): Table = {
val exprs = values.asScala
.map(ApiExpressionUtils.objectToExpression)
.toArray
fromValues(exprs: _*)
}
override def fromValues(rowType: AbstractDataType[_], values: JIterable[_]): Table = {
val exprs = values.asScala
.map(ApiExpressionUtils.objectToExpression)
.toArray
fromValues(rowType, exprs: _*)
}
/** Returns the [[FlinkRelBuilder]] of this TableEnvironment. */
private[flink] def getRelBuilder = {
val currentCatalogName = catalogManager.getCurrentCatalog
val currentDatabase = catalogManager.getCurrentDatabase
planningConfigurationBuilder.createRelBuilder(currentCatalogName, currentDatabase)
}
private[flink] def getFunctionCatalog: FunctionCatalog = {
functionCatalog
}
private[flink] def getParserConfig: SqlParser.Config = planningConfigurationBuilder
.getSqlParserConfig
/** Returns the Calcite [[FrameworkConfig]] of this TableEnvironment. */
@VisibleForTesting
private[flink] def getFlinkPlanner: FlinkPlannerImpl = {
val currentCatalogName = catalogManager.getCurrentCatalog
val currentDatabase = catalogManager.getCurrentDatabase
planningConfigurationBuilder.createFlinkPlanner(currentCatalogName, currentDatabase)
}
}
| aljoscha/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/internal/TableEnvImpl.scala | Scala | apache-2.0 | 51,032 |
/*
Copyright 2015 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization
import java.io._
object JavaStreamEnrichments {
def eof: Nothing = throw new EOFException()
// We use this to avoid allocating a closure to make
// a lazy parameter to require
private def illegal(s: String): Nothing =
throw new IllegalArgumentException(s)
/**
* Note this is only recommended for testing. You may want to use ByteArrayInputOutputStream for performance
* critical concerns
*/
implicit class RichByteArrayOutputStream(val baos: ByteArrayOutputStream) extends AnyVal {
def toInputStream: ByteArrayInputStream = new ByteArrayInputStream(baos.toByteArray)
}
/**
* enrichment to treat an Array like an OutputStream
*/
implicit class RichByteArray(val bytes: Array[Byte]) extends AnyVal {
def wrapAsOutputStream: ArrayWrappingOutputStream = wrapAsOutputStreamAt(0)
def wrapAsOutputStreamAt(pos: Int): ArrayWrappingOutputStream =
new ArrayWrappingOutputStream(bytes, pos)
}
/**
* Wraps an Array so that you can write into it as a stream without reallocations or copying at the end.
* Useful if you know an upper bound on the number of bytes you will write
*/
class ArrayWrappingOutputStream(val buffer: Array[Byte], initPos: Int) extends OutputStream {
if (buffer.length < initPos) {
illegal(s"Initial position cannot be more than length: $initPos > ${buffer.length}")
}
private[this] var pos = initPos
def position: Int = pos
override def write(b: Int): Unit = {
buffer(pos) = b.toByte
pos += 1
}
override def write(b: Array[Byte], off: Int, len: Int): Unit = {
Array.copy(b, off, buffer, pos, len)
pos += len
}
}
def posVarIntSize(i: Int): Int = {
if (i < 0) illegal(s"negative numbers not allowed: $i")
if (i < ((1 << 8) - 1)) 1
else {
if (i < ((1 << 16) - 1)) {
3
} else {
7
}
}
}
/**
* This has a lot of methods from DataInputStream without having to allocate to get them This code is
* similar to those algorithms
*/
implicit class RichInputStream(val s: InputStream) extends AnyVal {
/**
* If s supports marking, we mark it. Otherwise we read the needed bytes out into a ByteArrayStream and
* return that. This is intended for the case where you need possibly read size bytes but may stop early,
* then skip this exact number of bytes. Intended use is: {code} val size = 100 val marked =
* s.markOrBuffer(size) val y = fn(marked) marked.reset marked.skipFully(size) {/code}
*/
def markOrBuffer(size: Int): InputStream = {
val ms =
if (s.markSupported) s
else {
val buf = new Array[Byte](size)
s.readFully(buf)
new ByteArrayInputStream(buf)
}
// Make sure we can reset after we read this many bytes
ms.mark(size)
ms
}
def readBoolean: Boolean = readUnsignedByte != 0
/**
* Like read, but throws eof on error
*/
def readByte: Byte = readUnsignedByte.toByte
def readUnsignedByte: Int = {
// Note that Java, when you read a byte, returns a Int holding an unsigned byte.
// if the value is < 0, you hit EOF.
val c1 = s.read
if (c1 < 0) eof else c1
}
def readUnsignedShort: Int = {
val c1 = s.read
val c2 = s.read
if ((c1 | c2) < 0) eof else ((c1 << 8) | c2)
}
final def readFully(bytes: Array[Byte]): Unit = readFully(bytes, 0, bytes.length)
final def readFully(bytes: Array[Byte], offset: Int, len: Int): Unit = {
if (len < 0) throw new IndexOutOfBoundsException()
@annotation.tailrec
def go(o: Int, l: Int): Unit =
if (l == 0) ()
else {
val count = s.read(bytes, o, l)
if (count < 0) eof
else go(o + count, l - count)
}
go(offset, len)
}
def readDouble: Double = java.lang.Double.longBitsToDouble(readLong)
def readFloat: Float = java.lang.Float.intBitsToFloat(readInt)
/**
* This is the algorithm from DataInputStream it was also benchmarked against the approach used in
* readLong and found to be faster
*/
def readInt: Int = {
val c1 = s.read
val c2 = s.read
val c3 = s.read
val c4 = s.read
if ((c1 | c2 | c3 | c4) < 0) eof else ((c1 << 24) | (c2 << 16) | (c3 << 8) | c4)
}
/*
* This is the algorithm from DataInputStream
* it was also benchmarked against the same approach used
* in readInt (buffer-less) and found to be faster.
*/
def readLong: Long = {
val buf = new Array[Byte](8)
readFully(buf)
(buf(0).toLong << 56) +
((buf(1) & 255).toLong << 48) +
((buf(2) & 255).toLong << 40) +
((buf(3) & 255).toLong << 32) +
((buf(4) & 255).toLong << 24) +
((buf(5) & 255) << 16) +
((buf(6) & 255) << 8) +
(buf(7) & 255)
}
def readChar: Char = {
val c1 = s.read
val c2 = s.read
// This is the algorithm from DataInputStream
if ((c1 | c2) < 0) eof else ((c1 << 8) | c2).toChar
}
def readShort: Short = {
val c1 = s.read
val c2 = s.read
// This is the algorithm from DataInputStream
if ((c1 | c2) < 0) eof else ((c1 << 8) | c2).toShort
}
/**
* This reads a varInt encoding that only encodes non-negative numbers. It uses: 1 byte for values 0 -
* 255, 3 bytes for 256 - 65535, 7 bytes for 65536 - Int.MaxValue
*/
final def readPosVarInt: Int = {
val c1 = readUnsignedByte
if (c1 < ((1 << 8) - 1)) c1
else {
val c2 = readUnsignedShort
if (c2 < ((1 << 16) - 1)) c2
else readInt
}
}
final def skipFully(count: Long): Unit = {
@annotation.tailrec
def go(c: Long): Unit = {
val skipped = s.skip(c)
if (skipped == c) ()
else if (skipped == 0L)
throw new IOException(s"could not skipFully: count, c, skipped = ${(count, c, skipped)}")
else go(c - skipped)
}
if (count != 0L) go(count) else ()
}
}
implicit class RichOutputStream(val s: OutputStream) extends AnyVal {
def writeBoolean(b: Boolean): Unit = if (b) s.write(1: Byte) else s.write(0: Byte)
def writeBytes(b: Array[Byte], off: Int, len: Int): Unit =
s.write(b, off, len)
def writeByte(b: Byte): Unit = s.write(b)
def writeBytes(b: Array[Byte]): Unit = writeBytes(b, 0, b.length)
/**
* This reads a varInt encoding that only encodes non-negative numbers. It uses: 1 byte for values 0 -
* 255, 3 bytes for 256 - 65535, 7 bytes for 65536 - Int.MaxValue
*/
def writePosVarInt(i: Int): Unit = {
if (i < 0) illegal(s"must be non-negative: $i")
if (i < ((1 << 8) - 1)) s.write(i)
else {
s.write(-1: Byte)
if (i < ((1 << 16) - 1)) {
s.write(i >> 8)
s.write(i)
} else {
// the linter does not like us repeating ourselves here
s.write(-1) // linter:ignore
s.write(-1) // linter:ignore
writeInt(i)
}
}
}
def writeDouble(d: Double): Unit = writeLong(java.lang.Double.doubleToLongBits(d))
def writeFloat(f: Float): Unit = writeInt(java.lang.Float.floatToIntBits(f))
def writeLong(l: Long): Unit = {
s.write((l >>> 56).toInt)
s.write((l >>> 48).toInt)
s.write((l >>> 40).toInt)
s.write((l >>> 32).toInt)
s.write((l >>> 24).toInt)
s.write((l >>> 16).toInt)
s.write((l >>> 8).toInt)
s.write(l.toInt)
}
def writeInt(i: Int): Unit = {
s.write(i >>> 24)
s.write(i >>> 16)
s.write(i >>> 8)
s.write(i)
}
def writeChar(sh: Char): Unit = {
s.write(sh >>> 8)
s.write(sh.toInt)
}
def writeShort(sh: Short): Unit = {
s.write(sh >>> 8)
s.write(sh.toInt)
}
}
}
| twitter/scalding | scalding-serialization/src/main/scala/com/twitter/scalding/serialization/JavaStreamEnrichments.scala | Scala | apache-2.0 | 8,501 |
package scala.lms
package epfl
package test7
import common._
import test1._
import util.OverloadHack
import java.io.{PrintWriter,StringWriter,FileOutputStream}
trait NestLambdaProg1 extends BooleanOps with PrimitiveOps with Functions with Print { // also used by TestLambdaLift
def test(x: Rep[Unit]) = {
val f = doLambda { x: Rep[Double] =>
val g = doLambda { y: Rep[Double] =>
print("yo")
y + (unit(4.0) * unit(3.0))
}
g
}
f
}
}
trait NestCondProg2 extends BooleanOps with PrimitiveOps with Functions with IfThenElse with Print {
/* Previously this program exhibited behavior that is likely undesired in many
cases. The definition of f was moved *into* g and into the conditional.
The doLambda in the else branch would not be hoisted out of g either.
Although there are situations where this particular kind of code motion
is an improvement (namely, if the probability of y == true is very low
and the else branch would be cheap).
*/
def test(x: Rep[Unit]) = {
val f = doLambda { x: Rep[Double] => 2 * x }
val g = doLambda { y: Rep[Boolean] =>
print("yo")
if (y)
f
else
doLambda { x: Rep[Double] => x + 1 }
}
g
}
}
trait NestCondProg3 extends BooleanOps with PrimitiveOps with Functions with IfThenElse with Print {
def test(x: Rep[Unit]) = {
val f = if (unit(true)) doLambda { x: Rep[Double] => 2 * x } else doLambda { x: Rep[Double] => 4 * x }
val g = doLambda { y: Rep[Boolean] =>
print("yo")
if (y) {
print("then")
f
} else {
print("else")
if (unit(false)) doLambda { x: Rep[Double] => x + 1 } else doLambda { x: Rep[Double] => x + 2 }
}
}
g
}
}
trait NestCondProg4 extends BooleanOps with PrimitiveOps with Functions with IfThenElse with Print {
def test(x: Rep[Unit]) = {
val g = doLambda { y: Rep[Double] =>
if (unit(true)) {
val x = y + 1.0
print(x)
()
} else {
}
}
g
}
}
trait NestCondProg5 extends BooleanOps with PrimitiveOps with Functions with IfThenElse with Print {
def test(x: Rep[Unit]) = {
if (unit(true)) {
// should place 7 + 9 here
doLambda { y: Rep[Double] =>
print(unit(7.0) + unit(9.0))
}
} else {
doLambda { u: Rep[Double] => } // dummy
}
}
}
trait NestCondProg6 extends BooleanOps with PrimitiveOps with Functions with IfThenElse with Print {
// FIXME: this one doesn't work yet!!!
def test(x: Rep[Unit]) = {
val z = unit(7.0) + unit(9.0) // should move into the conditional (but isn't currently)
val x = if (unit(true)) {
print(z)
} else {
}
doLambda { y: Rep[Boolean] =>
print(x)
}
}
}
trait NestCondProg7 extends LiftAll with BooleanOps with PrimitiveOps with OrderingOps with Functions with IfThenElse with Print {
def test(x: Rep[Unit]) = {
doLambda { y: Rep[Double] =>
if (y < 100.0) {
val z = y + unit(9.0) // should stay inside conditional:
// apparently z was moved up because it is also used in the lambda (z+u)
doLambda { u: Rep[Double] =>
z + u
}
} else {
doLambda { u: Rep[Double] => u} // dummy
}
}
}
}
/*
seems to be another incarnation of test6
trait NestCondProg8 extends PrimitiveOps with OrderingOps with Functions with IfThenElse with Print {
// FIXME
def test(x: Rep[Unit]) = {
doLambda { y: Rep[Double] =>
if (y < 100) {
val z = y + unit(9.0) // should stay inside conditional
z + unit(1.0)
} else {
val z = y + unit(9.0) // should stay inside conditional, although used again
z + unit(2.0)
}
}
}
}
*/
class TestCodemotion extends FileDiffSuite {
val prefix = home + "test-out/epfl/test7-"
def testCodemotion1 = {
// test loop hoisting (should use loops but lambdas will do for now)
withOutFile(prefix+"codemotion1") {
new NestLambdaProg1 with FunctionsExp with PrintExp
with CoreOpsPkgExp { self =>
val codegen = new ScalaGenPrimitiveOps with ScalaGenFunctions with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"codemotion1")
}
def testCodemotion2 = {
// test loop hoisting (should use loops but lambdas will do for now)
withOutFile(prefix+"codemotion2") {
new NestCondProg2 with FunctionsExp with PrintExp with IfThenElseExp
with CoreOpsPkgExp { self =>
val codegen = new ScalaGenPrimitiveOps with ScalaGenFunctions with ScalaGenIfThenElse with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"codemotion2")
}
def testCodemotion3 = {
// test loop hoisting (should use loops but lambdas will do for now)
withOutFile(prefix+"codemotion3") {
new NestCondProg3 with FunctionsExp with PrintExp with IfThenElseExp
with CoreOpsPkgExp { self =>
val codegen = new ScalaGenPrimitiveOps with ScalaGenFunctions with ScalaGenIfThenElse with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"codemotion3")
}
def testCodemotion4 = {
// test loop hoisting (should use loops but lambdas will do for now)
withOutFile(prefix+"codemotion4") {
new NestCondProg4 with FunctionsExp with PrintExp with IfThenElseExp
with CoreOpsPkgExp { self =>
val codegen = new ScalaGenPrimitiveOps with ScalaGenFunctions with ScalaGenIfThenElse with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"codemotion4")
}
def testCodemotion5 = {
// test loop hoisting (should use loops but lambdas will do for now)
withOutFile(prefix+"codemotion5") {
new NestCondProg5 with FunctionsExp with PrintExp with IfThenElseExp
with CoreOpsPkgExp { self =>
val codegen = new ScalaGenPrimitiveOps with ScalaGenFunctions with ScalaGenIfThenElse with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"codemotion5")
}
def testCodemotion6 = {
// test loop hoisting (should use loops but lambdas will do for now)
withOutFile(prefix+"codemotion6") {
new NestCondProg6 with FunctionsExp with IfThenElseExp with PrintExp
with CoreOpsPkgExp { self =>
val codegen = new ScalaGenPrimitiveOps with ScalaGenFunctions with ScalaGenIfThenElse with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
println("// NOTE: generated code is not ideal yet (x1=7+9 should be moved inside conditional). see source for discussion.")
}
}
// PENDING TEST
// assertFileEqualsCheck(prefix+"codemotion6")
}
def testCodemotion7 = {
// test loop hoisting (should use loops but lambdas will do for now)
withOutFile(prefix+"codemotion7") {
new NestCondProg7 with OrderingOpsExp with FunctionsExp with IfThenElseExp with PrintExp
with CoreOpsPkgExp { self =>
val codegen = new ScalaGenPrimitiveOps with ScalaGenOrderingOps with ScalaGenFunctions with ScalaGenIfThenElse with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
println("// was a Delite issue (Scratchpad in optiml-beta).")
}
}
assertFileEqualsCheck(prefix+"codemotion7")
}
}
| astojanov/virtualization-lms-core | test-src/epfl/test7-analysis/TestCodeMotion.scala | Scala | bsd-3-clause | 7,905 |
//
// Token.scala -- Scala class Token
// Project OrcScala
//
// $Id: Token.scala 2933 2011-12-15 16:26:02Z jthywissen $
//
// Created by dkitchin on Aug 12, 2011.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.run.core
import orc.{ Schedulable, OrcRuntime, OrcEvent, CaughtEvent }
import orc.ast.oil.nameless.{ Variable, UnboundVariable, Stop, Sequence, Prune, Parallel, Otherwise, Hole, HasType, Expression, Def, DeclareType, DeclareDefs, Constant, Call, Argument }
import orc.error.runtime.{ TokenException, StackLimitReachedError, ArityMismatchException, ArgumentTypeMismatchException }
import orc.error.OrcException
import orc.lib.time.{ Vtime, Vclock, Vawait }
import orc.util.BlockableMapExtension.addBlockableMapToList
import orc.values.sites.TotalSite
import orc.values.{ Signal, OrcRecord, Field }
/** @author dkitchin
*/
class Token protected (
protected var node: Expression,
protected var stack: Frame = EmptyFrame,
protected var env: List[Binding] = Nil,
protected var group: Group,
protected var clock: Option[VirtualClock] = None,
protected var state: TokenState = Live)
extends GroupMember with Schedulable {
var functionFramesPushed: Int = 0
val runtime: OrcRuntime = group.runtime
val options = group.root.options
/** Execution of a token cannot indefinitely block the executing thread. */
override val nonblocking = true
/** Public constructor */
def this(start: Expression, g: Group) = {
this(node = start, group = g, stack = GroupFrame(EmptyFrame))
}
/** Copy constructor with defaults */
private def copy(
node: Expression = node,
stack: Frame = stack,
env: List[Binding] = env,
group: Group = group,
clock: Option[VirtualClock] = clock,
state: TokenState = state): Token = {
new Token(node, stack, env, group, clock, state)
}
/*
* On creation: Add a token to its group if it is not halted or killed.
*/
state match {
case Publishing(_) | Live | Blocked(_) | Suspending(_) | Suspended(_) => group.add(this)
case Halted | Killed => {}
}
/** Change this token's state.
*
* Return true if the token's state was successfully set
* to the requested state.
*
* Return false if the token's state was already Killed.
*/
protected def setState(newState: TokenState): Boolean = synchronized {
/*
* Make sure that the token has not been killed.
* If it has been killed, return false immediately.
*/
if (state != Killed) { state = newState; true } else false
}
//@volatile
//var scheduledBy: Throwable = null //FIXME: Remove "scheduledBy" debug facility
/* When a token is scheduled, notify its clock accordingly */
override def onSchedule() {
//scheduledBy = new Throwable("Task scheduled by")
//if (runtime.asInstanceOf[OrcWithThreadPoolScheduler].executor.asInstanceOf[OrcThreadPoolExecutor].getQueue().contains(this) && !group.isKilled()) Console.err.println("Token scheduled, in queue, && group alive! state="+state+"\\n"+scheduledBy.toString())
clock foreach { _.unsetQuiescent() }
super.onSchedule()
}
/* When a token is finished running, notify its clock accordingly */
override def onComplete() {
clock foreach { _.setQuiescent() }
super.onComplete()
}
/** Pass an event to this token's enclosing group.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def notifyOrc(event: OrcEvent) { group.notifyOrc(event) }
/** Kill this token.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def kill() {
def collapseState(victimState: TokenState) {
victimState match {
case Suspending(s) => collapseState(s)
case Suspended(s) => collapseState(s)
case Blocked(handle: SiteCallHandle) => { handle.kill() }
case Live | Publishing(_) | Blocked(_) | Halted | Killed => {}
}
}
synchronized {
collapseState(state)
if (setState(Killed)) {
/* group.remove(this) conceptually runs here, but as an optimization,
* this is unnecessary. Note that the current Group.remove implementation
* relies on this optimization for correctness of the tokenCount. */
}
}
}
/** Make this token block on some resource.
*
* This method is synchronous:
* it must only be called from a thread that is currently
* executing the run() method of this token.
*/
def blockOn(blocker: Blocker) {
state match {
case Live => setState(Blocked(blocker))
case Killed => {}
case _ => throw new AssertionError("Only live tokens may be blocked: state=" + state)
}
}
/** Unblock a token that is currently blocked on some resource.
* Schedule the token to run.
*
* This method is synchronous:
* it must only be called from a thread that is currently
* executing the run() method of this token.
*/
def unblock() {
state match {
case Blocked(_) => {
if (setState(Live)) { schedule() }
}
case Suspending(Blocked(_: OtherwiseGroup)) => {
if (setState(Suspending(Live))) { schedule() }
}
case Suspended(Blocked(_: OtherwiseGroup)) => {
setState(Suspended(Live))
}
case Killed => {}
case _ => { throw new AssertionError("unblock on a Token that is not Blocked/Killed: state=" + state) }
}
}
/** Suspend the token in preparation for a program rewrite.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def suspend() {
state match {
case Live | Blocked(_) | Publishing(_) => setState(Suspending(state))
case Suspending(_) | Suspended(_) | Halted | Killed => {}
}
}
/** Resume the token from suspension after a program rewrite.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def resume() {
state match {
case Suspending(prevState) => setState(prevState)
case Suspended(prevState) => {
if (setState(prevState)) { schedule() }
}
case Publishing(_) | Live | Blocked(_) | Halted | Killed => {}
}
}
def schedule() = runtime.schedule(this)
protected def fork() = synchronized { (this, copy()) }
def move(e: Expression) = { node = e; this }
def jump(context: List[Binding]) = { env = context; this }
protected def push(newStack: Frame) = {
if (newStack.isInstanceOf[FunctionFrame]) {
functionFramesPushed = functionFramesPushed + 1
if (options.stackSize > 0 && functionFramesPushed > options.stackSize) {
this !! new StackLimitReachedError(options.stackSize)
}
}
stack = newStack
this
}
/** Remove the top frame of this token's stack.
*
* This method is synchronous:
* it must only be called from a thread that is currently
* executing the run() method of this token.
*/
def pop() = {
if (stack.isInstanceOf[FunctionFrame]) {
functionFramesPushed = functionFramesPushed - 1
}
stack = stack.asInstanceOf[CompositeFrame].previous
}
def getGroup(): Group = { group }
def getNode(): Expression = { node }
def getEnv(): List[Binding] = { env }
def getStack(): Frame = { stack }
def getClock(): Option[VirtualClock] = { clock }
def migrate(newGroup: Group) = {
require(newGroup != group)
val oldGroup = group
newGroup.add(this); oldGroup.remove(this)
group = newGroup
this
}
protected def join(newGroup: Group) = {
push(GroupFrame(stack))
migrate(newGroup)
this
}
def bind(b: Binding) = {
env = b :: env
stack match {
case BindingFrame(n, previous) => { stack = BindingFrame(n + 1, previous) }
/* Tail call optimization (part 1 of 2) */
case _: FunctionFrame if (!options.disableTailCallOpt) => { /* Do not push a binding frame over a tail call.*/ }
case _ => { push(BindingFrame(1, stack)) }
}
this
}
def unbind(n: Int) = { env = env.drop(n); this }
protected def lookup(a: Argument): Binding = {
a match {
case Constant(v) => BoundValue(v)
case Variable(n) => env(n)
case UnboundVariable(x) => BoundStop //TODO: Also report the presence of an unbound variable.
}
}
/** Attempt to resolve a binding to a value.
* When the binding resolves to v, call k(v).
* (If it is already resolved, k is called immediately)
*
* If the binding resolves to a halt, halt this token.
*/
protected def resolve(b: Binding)(k: AnyRef => Unit) {
resolveOptional(b) {
case Some(v) => k(v)
case None => halt()
}
}
/** Attempt to resolve a binding to a value.
* When the binding resolves to v, call k(Some(v)).
* (If it is already resolved, k is called immediately)
*
* If the binding resolves to a halt, call k(None).
*
* Note that resolving a closure also encloses its context.
*/
protected def resolveOptional(b: Binding)(k: Option[AnyRef] => Unit): Unit = {
b match {
case BoundValue(v) =>
v match {
case c: Closure =>
enclose(c.lexicalContext) { newContext: List[Binding] =>
k(Some(Closure(c.defs, c.pos, newContext)))
}
case u => k(Some(u))
}
case BoundStop => k(None)
case BoundFuture(g) => {
push(FutureFrame(k, stack))
g read this
}
}
}
/** Create a new Closure object whose lexical bindings are all resolved and replaced.
* Such a closure will have no references to any group.
* This object is then passed to the continuation.
*/
protected def enclose(bs: List[Binding])(k: List[Binding] => Unit): Unit = {
def resolveBound(b: Binding)(k: Binding => Unit) =
resolveOptional(b) {
case Some(v) => k(BoundValue(v))
case None => k(BoundStop)
}
bs.blockableMap(resolveBound)(k)
}
protected def functionCall(d: Def, context: List[Binding], params: List[Binding]) {
if (params.size != d.arity) {
this !! new ArityMismatchException(d.arity, params.size) /* Arity mismatch. */
} else {
/* 1) If this is not a tail call, push a function frame referring to the current environment.
* 2) Change the current environment to the closure's saved environment.
* 3) Add bindings for the arguments to the new current environment.
*
* Caution: The ordering of these operations is very important;
* do not permute them.
*/
/* Tail call optimization (part 2 of 2) */
/*
* Push a new FunctionFrame
* only if the call is not a tail call.
*/
if (!stack.isInstanceOf[FunctionFrame] || options.disableTailCallOpt) {
push(FunctionFrame(node, env, stack))
}
/* Jump into the function context */
jump(context)
/* Bind the args */
for (p <- params) { bind(p) }
/* Move into the function body */
move(d.body)
schedule()
}
}
protected def clockCall(vc: VirtualClockOperation, actuals: List[AnyRef]): Unit = {
(vc, actuals) match {
case (`Vclock`, List(f)) => {
f match {
case totalf: TotalSite => {
def ordering(x: AnyRef, y: AnyRef) = {
// TODO: Add error handling, either here or in the scheduler.
// A comparator error should kill the engine.
val i = totalf.evaluate(List(x, y)).asInstanceOf[Int]
assert(i == -1 || i == 0 || i == 1)
i
}
clock = Some(new VirtualClock(clock, ordering, runtime))
publish()
}
case _ => {
this !! (new ArgumentTypeMismatchException(0, "TotalSite", f.toString()))
}
}
}
case (`Vawait`, List(t)) => {
clock match {
case Some(cl) => cl.await(this, t)
case None => halt()
}
}
case (`Vtime`, Nil) => {
clock flatMap { _.now() } match {
case Some(t) => publish(t)
case None => halt()
}
}
case _ => throw new ArityMismatchException(vc.arity, actuals.size)
}
}
protected def siteCall(s: AnyRef, actuals: List[AnyRef]): Unit = {
s match {
case vc: VirtualClockOperation => {
clockCall(vc, actuals)
}
case _ => {
val sh = new SiteCallHandle(this, s, actuals)
blockOn(sh)
runtime.schedule(sh)
}
}
}
protected def makeCall(target: AnyRef, params: List[Binding]): Unit = {
target match {
case c: Closure => {
functionCall(c.code, c.context, params)
}
/* I wish this didn't need a special case...
* but if the record element is a closure,
* it can't be handled by an invocation trait.
* -dkitchin
*/
case r @ OrcRecord(entries) if entries contains "apply" => {
params.blockableMap(resolve) {
case args @ List(Field(_)) => siteCall(r, args) // apply isn't allowed to supersede other member accesses
case _ => makeCall(entries("apply"), params)
}
}
case s => {
params.blockableMap(resolve) { siteCall(s, _) }
}
}
}
def stackOK(testStack: Array[java.lang.StackTraceElement], offset: Int): Boolean =
testStack.length == 4 + offset && testStack(1 + offset).getMethodName() == "runTask" ||
testStack(1 + offset).getMethodName() == "eval" && testStack(2 + offset).getMethodName() == "run" && stackOK(testStack, offset + 2)
def run() {
//val ourStack = new Throwable("Entering Token.run").getStackTrace()
//assert(stackOK(ourStack, 0), "Token run not in ThreadPoolExecutor.Worker! sl="+ourStack.length+", m1="+ourStack(1).getMethodName()+", state="+state)
try {
if (group.isKilled()) { kill() }
state match {
case Live => eval(node)
case Suspending(prevState) => setState(Suspended(prevState))
case Blocked(b) => b.check(this)
case Publishing(v) => if (setState(Live)) { stack(this, v) }
case Killed => {} // This token was killed while it was on the schedule queue; ignore it
case Suspended(_) => throw new AssertionError("suspended token scheduled")
case Halted => throw new AssertionError("halted token scheduled")
}
} catch {
case e: OrcException => this !! e
case e: InterruptedException => { halt(); Thread.currentThread().interrupt() } //Thread interrupt causes halt without notify
case e => { notifyOrc(CaughtEvent(e)); halt() }
}
}
protected def eval(node: orc.ast.oil.nameless.Expression) {
node match {
case Stop() => halt()
case Hole(_, _) => halt()
case (a: Argument) => resolve(lookup(a)) { publish }
case Call(target, args, _) => {
val params = args map lookup
lookup(target) match {
case BoundValue(c: Closure) => functionCall(c.code, c.context, params)
case b => resolve(b) { makeCall(_, params) }
}
}
case Parallel(left, right) => {
val (l, r) = fork()
l.move(left)
r.move(right)
runtime.schedule(l, r)
}
case Sequence(left, right) => {
push(SequenceFrame(right, stack))
move(left)
schedule()
}
case Prune(left, right) => {
val (l, r) = fork()
val pg = new PruningGroup(group)
l.bind(BoundFuture(pg))
r.join(pg)
l.move(left)
r.move(right)
runtime.schedule(l, r)
}
case Otherwise(left, right) => {
val (l, r) = fork
r.move(right)
val region = new OtherwiseGroup(group, r)
l.join(region)
l.move(left)
runtime.schedule(l)
}
case decldefs @ DeclareDefs(openvars, defs, body) => {
/* Closure compaction: Bind only the free variables
* of the defs in this lexical context.
*/
val lexicalContext = openvars map { i: Int => lookup(Variable(i)) }
for (i <- defs.indices) {
bind(BoundValue(Closure(defs, i, lexicalContext)))
}
move(body)
schedule()
}
case HasType(expr, _) => {
move(expr)
run()
}
case DeclareType(_, expr) => {
move(expr)
run()
}
}
}
def publish(v: AnyRef) {
state match {
case Blocked(_: OtherwiseGroup) => throw new AssertionError("publish on a pending Token")
case Live | Blocked(_) => {
setState(Publishing(v))
schedule()
}
case Suspending(_) => {
setState(Suspending(Publishing(v)))
schedule()
}
case Suspended(_) => {
setState(Suspended(Publishing(v)))
}
case Publishing(_) => throw new AssertionError("Already publishing!")
case Halted | Killed => {}
}
}
def publish() { publish(Signal) }
def halt() {
state match {
case Publishing(_) | Live | Blocked(_) | Suspending(_) => {
setState(Halted)
group.halt(this)
}
case Suspended(_) => throw new AssertionError("halt on a suspended Token")
case Halted | Killed => {}
}
}
def !!(e: OrcException) {
e.setPosition(node.pos)
e match {
case te: TokenException if (te.getBacktrace() == null || te.getBacktrace().length == 0) => {
val callPoints = stack.toList collect { case f: FunctionFrame => f.callpoint.pos }
te.setBacktrace(callPoints.toArray)
}
case _ => {} // Not a TokenException; no need to collect backtrace
}
notifyOrc(CaughtEvent(e))
halt()
}
}
/** */
trait TokenState {
val isLive: Boolean
}
/** Token is ready to make progress */
case object Live extends TokenState {
val isLive = true
}
/** Token is propagating a published value */
case class Publishing(v: AnyRef) extends TokenState {
val isLive = true
}
/** Token is waiting on another task */
case class Blocked(blocker: Blocker) extends TokenState {
val isLive = true
}
/** Token has been told to suspend, but it's still in the scheduler queue */
case class Suspending(prevState: TokenState) extends TokenState {
val isLive = prevState.isLive
}
/** Suspended Tokens must be re-scheduled upon resume */
case class Suspended(prevState: TokenState) extends TokenState {
val isLive = prevState.isLive
}
/** Token halted itself */
case object Halted extends TokenState {
val isLive = false
}
/** Token killed by engine */
case object Killed extends TokenState {
val isLive = false
}
| laurenyew/cOrcS | src/orc/run/core/Token.scala | Scala | bsd-3-clause | 19,037 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013-16 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.sidl
import scala.collection.JavaConversions.asScalaBuffer
import de.ust.skill.ir.ConstantLengthArrayType
import de.ust.skill.ir.Declaration
import de.ust.skill.ir.Field
import de.ust.skill.ir.FieldLike
import de.ust.skill.ir.GroundType
import de.ust.skill.ir.ListType
import de.ust.skill.ir.MapType
import de.ust.skill.ir.SetType
import de.ust.skill.ir.Type
import de.ust.skill.ir.VariableLengthArrayType
/**
* Fake Main implementation required to make trait stacking work.
*/
abstract class FakeMain extends GeneralOutputMaker { def make {} }
/**
* Skill Specification pretty printing.
*
* @author Timm Felden
*/
class Main extends FakeMain
with SIDLMaker {
lineLength = 80
override def comment(d : Declaration) : String = d.getComment.format("/**\\n", " * ", lineLength, " */\\n")
override def comment(f : FieldLike) : String = f.getComment.format("/**\\n", " * ", lineLength, " */\\n ")
override def packageDependentPathPostfix = ""
override def defaultCleanMode = "none";
/**
* Translates the types into Ada types.
*/
override protected def mapType(t : Type) : String = t match {
case t : GroundType ⇒ t.getSkillName
case t : ConstantLengthArrayType ⇒ s"${mapType(t.getBaseType)}[${t.getLength}]"
case t : VariableLengthArrayType ⇒ s"${mapType(t.getBaseType)}[]"
case t : ListType ⇒ s"list<${mapType(t.getBaseType)}>"
case t : SetType ⇒ s"set<${mapType(t.getBaseType)}>"
case t : MapType ⇒ t.getBaseTypes.mkString("map<", ", ", ">")
case t ⇒ t.getName.capital
}
/**
* Provides the package prefix.
*/
override protected def packagePrefix() : String = _packagePrefix
private var _packagePrefix = ""
override def setPackage(names : List[String]) {
_packagePrefix = names.foldRight("")(_ + "." + _)
}
override def setOption(option : String, value : String) {
option match {
case "drop" ⇒ value match {
case "interfaces" ⇒ droppedKinds += Interfaces
}
case unknown ⇒ sys.error(s"unkown Argument: $unknown")
}
}
/**
* stats do not require any escaping
*/
override def escaped(target : String) : String = target;
override def helpText : String = """
drop = (interfaces|enums|typedefs|views|all)
drops the argument kind from the specification, defaults is none
"""
override def customFieldManual : String = "will keep all custom fields as-is"
// unused
override protected def defaultValue(f : Field) = throw new NoSuchMethodError
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/sidl/Main.scala | Scala | bsd-3-clause | 3,034 |
package com.microsoft.partnercatalyst.fortis.spark.transforms.locations.client
import com.microsoft.partnercatalyst.fortis.spark.dto.Geofence
import com.microsoft.partnercatalyst.fortis.spark.logging.Loggable
import com.microsoft.partnercatalyst.fortis.spark.transforms.locations.dto.{FeatureServiceFeature, FeatureServiceResponse}
import net.liftweb.json
import scala.io.Source
import scala.util.{Failure, Success, Try}
@SerialVersionUID(100L)
class FeatureServiceClient(apiUrlBase: String, namespace: Option[String]) extends Serializable with Loggable {
def bbox(geofence: Geofence, layers: Seq[String] = List()): Iterable[FeatureServiceFeature] = {
unpack(fetchBboxResponse(geofence, layers), "bbox")
}
def point(latitude: Double, longitude: Double): Iterable[FeatureServiceFeature] = {
unpack(fetchPointResponse(latitude = latitude, longitude = longitude), "point")
}
def name(names: Iterable[String]): Iterable[FeatureServiceFeature] = {
unpack(fetchNameResponse(names), "name")
}
private def unpack(responseBody: Try[String], endpointName: String): Iterable[FeatureServiceFeature] = {
val parsedResponse = responseBody.flatMap(parseResponse)
parsedResponse match {
case Success(domainObject) =>
domainObject
case Failure(err) =>
logError(s"Error fetching feature service $endpointName", err)
List()
}
}
private def parseResponse(response: String): Try[Iterable[FeatureServiceFeature]] = {
implicit val formats = json.DefaultFormats
Try(json.parse(response).extract[FeatureServiceResponse].features)
}
protected def fetchBboxResponse(geofence: Geofence, layers: Seq[String]): Try[String] = {
val fetch = s"$apiUrlBase/features/bbox/${geofence.north}/${geofence.west}/${geofence.south}/${geofence.east}"
fetchResponse(addQueryParameters(fetch, layers))
}
protected def fetchPointResponse(latitude: Double, longitude: Double): Try[String] = {
val fetch = s"$apiUrlBase/features/point/$latitude/$longitude"
fetchResponse(addQueryParameters(fetch))
}
protected def fetchNameResponse(names: Iterable[String]): Try[String] = {
val fetch = s"$apiUrlBase/features/name/${names.mkString(",")}"
fetchResponse(addQueryParameters(fetch))
}
private def addQueryParameters(baseUrl: String, layers: Seq[String] = List()): String = {
var url = baseUrl
url += "?include=centroid"
if (layers.nonEmpty) {
url += s"&filter_layer=${layers.mkString(",")}"
}
if (namespace.nonEmpty) {
url += s"&filter_namespace=${namespace.get}"
}
url
}
private def fetchResponse(url: String): Try[String] = {
Try(Source.fromURL(url)("UTF-8").mkString)
}
}
| CatalystCode/project-fortis-spark | src/main/scala/com/microsoft/partnercatalyst/fortis/spark/transforms/locations/client/FeatureServiceClient.scala | Scala | mit | 2,716 |
package visceljs.render
import org.scalajs.dom
import org.scalajs.dom.MouseEvent
import org.scalajs.dom.html.Body
import rescala.default._
import scalatags.JsDom
import scalatags.JsDom.all.{
HtmlTag, Modifier, Tag, a, bindJsAnyLike, body, href, id, onclick, p, rel, stringAttr, stringFrag, title, span
}
import scalatags.JsDom.attrs.{disabled, style}
import scalatags.JsDom.tags2.{article, main}
import viscel.shared.{Bookmark, Contents, Vid}
import visceljs.Definitions.lcButton
import visceljs.Navigation._
import visceljs.{Actions, Definitions, Icons}
import rescala.extra.Tags._
class ImagePage(act: Actions) {
def onLeftClickPrevNext(handler: Navigate => Unit): Modifier =
onclick := { (e: MouseEvent) =>
val node = e.currentTarget.asInstanceOf[dom.html.Element]
if (e.button == 0 && dom.document.getSelection().isCollapsed && dom.window.getSelection().isCollapsed) {
e.preventDefault()
val relx = e.clientX - node.offsetLeft
val border = math.max(node.offsetWidth / 10, 100)
if (relx < border) handler(Prev)
else handler(Next)
}
}
def gen(
vid: Vid,
position: Position,
bookmark: Bookmark,
contents: Contents,
fitType: Signal[FitType],
navigate: Evt[Navigate]
): JsDom.TypedTag[Body] = {
val mainPart: HtmlTag = {
contents.gallery.lift(position.cur).fold[HtmlTag](p(s"invalid position")) { asst =>
article(Snippets.asset(asst, style := fitType.map(Snippets.imageStyle))(
asst.data.get("title").fold[Option[Tag]](None)(t => Some(p(t))).toSeq: _*
))
}
}
val navigation: HtmlTag = {
val prev = position.mov(-1)
val next = position.mov(1)
Snippets.navigation(
a(Icons.prev, rel := "prev", title := "previous page")(if (prev.cur == position.cur) disabled
else href := Definitions.path_asset(vid, prev.cur)),
a(href := Definitions.path_front(vid), Icons.front, title := "back to front page"),
Snippets.fullscreenToggle(Icons.maximize, Icons.minimize, title := "toggle fullscreen"),
lcButton(
navigate.fire(Mode(fitType.now.next)),
Icons.modus,
fitType.map(ft => span(s" $ft")).asModifier,
title := "cycle image display mode"
),
act.postBookmark(
vid,
position.cur + 1,
bookmark.position,
contents.gallery.lift(position.cur),
Icons.bookmark,
title := "save bookmark"
),
a(href := contents.gallery.lift(position.cur).fold("")(_.origin), rel := "noreferrer")(
Icons.externalLink,
title := "visit original page"
),
a(Icons.next, rel := "next", title := "next")(if (next.cur == position.cur) disabled
else href := Definitions.path_asset(vid, next.cur))
)
}
val mainSection = main(mainPart, onLeftClickPrevNext(navigate.fire))
body(id := "view", mainSection, navigation)
}
}
| rmgk/viscel | code/js/src/main/scala/visceljs/render/ImagePage.scala | Scala | agpl-3.0 | 2,994 |
package step4
object Runner extends App {
val itemX = Item("ItemX")
val item01 = Item("Item01")
val item02 = Item("Item02")
val item03 = Item("Item03")
val item04 = Item("Item04")
val item05 = Item("Item05")
val item06 = Item("Item06")
val item07 = Item("Item07")
val item08 = Item("Item08")
val item09 = Item("Item09")
val item10 = Item("Item10")
val item11 = Item("Item11")
val item12 = Item("Item12")
val number1 = PlusNumberPositive(PlusNumberPositive(PlusNumberPositive(PlusNumberZero, item01), item02), item03)
// + 3
val number2 = PlusNumberPositive(
PlusNumberPositive(PlusNumberPositive(PlusNumberPositive(PlusNumberPositive(PlusNumberZero, item04), item05), item06), item07),
item08
)
// + 5
val number3 =
PlusNumberPositive(PlusNumberPositive(PlusNumberPositive(PlusNumberPositive(PlusNumberZero, item09), item10), item11), item12)
// + 4
val number4 = MinusNumberPositive(MinusNumberPositive(MinusNumberPositive(MinusNumberZero, itemX), itemX), itemX)
// - 3
val number5 =
MinusNumberPositive(
MinusNumberPositive(MinusNumberPositive(MinusNumberPositive(MinusNumberPositive(MinusNumberZero, itemX), itemX), itemX), itemX),
itemX
)
// - 5
val number6 =
MinusNumberPositive(MinusNumberPositive(MinusNumberPositive(MinusNumberPositive(MinusNumberZero, itemX), itemX), itemX), itemX)
// - 4
val count1 = 被动消耗正(被动消耗正(被动消耗正(被动消耗零, number1), number2), number3)
// 3 + 5 + 4 = 12
println(count1.向左无害计算)
// ((((((((((((Zero, Item(Item01)), Item(Item02)), Item(Item03)), Item(Item04)), Item(Item05)), Item(Item06)), Item(Item07)), Item(Item08)), Item(Item09)), Item(Item10)), Item(Item11)), Item(Item12))
val count2 = 被动消耗正(被动消耗正(被动消耗正(被动消耗正(被动消耗零, number1), number2), number3), number4)
// 3 + 5 + 4 - 3 = 9
println(count2.向左无害计算)
// (((((((((Zero, Item(Item01)), Item(Item02)), Item(Item03)), Item(Item04)), Item(Item05)), Item(Item06)), Item(Item07)), Item(Item08)), Item(Item09))
val count6 = 被动消耗正(被动消耗正(被动消耗正(被动消耗零, number1), number2), number5)
// 3 + 5 - 5 = 3
println(count6.向左无害计算)
// (((((((Zero, Item(Item01)), Item(Item02)), Item(Item03)), Item(Item04)), Item(Item05)), Item(Item09))
}
| djx314/ubw | a42-stream/src/main/scala/step4/Runner.scala | Scala | bsd-3-clause | 2,389 |
package com.tirthal.learning.scala.features
/*
* What is the use of "apply" method in Scala?
* - The apply method provides a short-hand way of calling a method on a class instance i.e. a way for faking function calls.
* - Syntax: instance(a) ---> internally calls ---> instance.apply(a)
* - Can use it for making APIs more expressive and factory style creation method
*
* What is the use of "update" method in Scala?
* - The update method provides a short-hand way of class instance assignment operator
* - Syntax: instance(a) = b ---> internally calls ---> instance.update(a, b)
* - Can use it to make class API to feel more like language syntax
*/
object ApplyUpdateMethodUsage {
def main(args: Array[String]) {
// ---> Scala Array has expressive syntax to access value using apply() and assign it using update()
val quaters = Array("Q1", "Q2", "Q3", "Q4")
// Syntax to call apply method of a class?
println("First quater code: " + quaters.apply(0)) // access array value using Array's inbuilt apply()
println("Second quater code: " + quaters(1)) // This is shorthand for quaters.apply(1)
// Syntax to call update method of a class?
quaters.update(0, "q1") // update array value using Array's inbuilt update()
quaters(1) = "q2" // This is shorthand for quaters.update(1, "q2")
// ---> Example of how to make APIs more expressive using apply
val multiply = new Multiplier()
multiply(2, 3) // This is instance variable, still syntax looks like a function call - shorthand for multiply.apply(2, 3) syntax
// ---> Example for offering factory style creation method using apply
val b1 = Book(1, "The Well-Grounded Java Developer", "Benjamin J. Evans & Martijn Verburg") // calls Book.apply(id: Int, name: String, author: String)
println(b1)
val b2 = Book(2, "Microsoft Application Architecture Guide - Patterns and Practices") // calls Book.apply(id: Int, name: String)
println(b2)
// ---> Example for expressive API using apply and update in custom class
val ptypes = new PaymentTypes()
println(ptypes)
println("Description on paypal:" + ptypes("paypal")) // calls PaymentTypes.apply(name: String)
ptypes("paypal") = "Paypal Payment Gateway" // calls PaymentTypes.update(name: String, description: String)
println(ptypes)
}
}
// ---> Custom class example of how to make APIs more expressive using apply
class Multiplier {
def apply(n1: Int, n2: Int): Unit = {println(n1 * n2)}
}
// ---> Custom class example for offering factory style creation method using apply
class Book(id: Int, name: String, author: String) {
override def toString: String = { id + ":" + name + ":" + author}
}
object Book { // Book's companion having multiple "apply" methods usage
def apply(id: Int, name: String, author: String) = new Book(id, name, author)
def apply(id: Int, name: String) = new Book(id, name, "Author is unknown")
}
// ---> Custom class example having apply and update methods
class PaymentTypes {
private val payTypes = scala.collection.mutable.Map(
"cc" -> "Credit Card",
"paypal" -> "Paypal",
"cash" -> "Cash")
def apply(name: String) = {
payTypes.get(name)
}
def update(name: String, description: String) {
payTypes.update(name, description)
}
override def toString = payTypes.toString()
} | tirthalpatel/Learning-Scala | ScalaQuickStart/src/main/scala/com/tirthal/learning/scala/features/ApplyUpdateMethodUsage.scala | Scala | mit | 3,438 |
package mesosphere.marathon
package state
case class ResourceLimits(cpus: Option[Double], mem: Option[Double])
object ResourceLimits {
def resourceLimitsToProto(limits: ResourceLimits): Protos.ResourceLimits = {
val b = Protos.ResourceLimits.newBuilder()
limits.cpus.foreach(b.setCpus)
limits.mem.foreach(b.setMem)
b.build
}
def resourceLimitsFromProto(proto: Protos.ResourceLimits): ResourceLimits = {
ResourceLimits(
cpus = if (proto.hasCpus) Some(proto.getCpus) else None,
mem = if (proto.hasMem) Some(proto.getMem) else None
)
}
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/state/ResourceLimits.scala | Scala | apache-2.0 | 582 |
// https://github.com/epfl-lara/stainless/issues/400
object LetAliasing {
case class MutableBox(var value: Boolean)
def mutate(b: MutableBox): Unit = {
b.value = true
}
def prop = {
val box1 = MutableBox(false)
val box2 = box1
mutate(box2)
assert(box1.value)
assert(box2.value)
box2
}
}
| epfl-lara/stainless | frontends/benchmarks/imperative/valid/LetAliasing.scala | Scala | apache-2.0 | 328 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx
/**
* The direction of a directed edge relative to a vertex.
*/
class EdgeDirection private (private val name: String) extends Serializable {
/**
* Reverse the direction of an edge. An in becomes out,
* out becomes in and both and either remain the same.
*/
def reverse: EdgeDirection = this match {
case EdgeDirection.In => EdgeDirection.Out
case EdgeDirection.Out => EdgeDirection.In
case EdgeDirection.Either => EdgeDirection.Either
case EdgeDirection.Both => EdgeDirection.Both
}
override def toString: String = "EdgeDirection." + name
override def equals(o: Any) = o match {
case other: EdgeDirection => other.name == name
case _ => false
}
override def hashCode = name.hashCode
}
/**
* A set of [[EdgeDirection]]s.
*/
object EdgeDirection {
/** Edges arriving at a vertex. */
final val In = new EdgeDirection("In")
/** Edges originating from a vertex. */
final val Out = new EdgeDirection("Out")
/** Edges originating from *or* arriving at a vertex of interest. */
final val Either = new EdgeDirection("Either")
/** Edges originating from *and* arriving at a vertex of interest. */
final val Both = new EdgeDirection("Both")
}
| sjtu-iiot/graphx-algorithm | src/main/scala/org/apache/spark/graphx/EdgeDirection.scala | Scala | gpl-2.0 | 2,047 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.