code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package chandu0101.scalajs.rn.apis import scala.scalajs.js class Fetch extends js.Object { // def this(request : js.Object) : Promise[js.Dynamic] = this() } //object Fetch
beni55/scalajs-react-native
core/src/main/scala/chandu0101/scalajs/rn/apis/Fetch.scala
Scala
apache-2.0
179
package twitter4s import com.typesafe.config.ConfigFactory import http.client.connection.impl.PlayWSHttpConnection import http.client.method.GetMethod import http.client.response.HttpHeader import org.scalatest._ import twitter4s.request.{TwitterAuthorizationHeader, TwitterTimelineRequest} import scala.concurrent.duration._ import scala.concurrent.Await import scala.concurrent.ExecutionContext.Implicits.global class TwitterRequestSpec extends FlatSpec with Matchers with OptionValues with Inside with Inspectors { val config = ConfigFactory.load("test.conf") val _baseUrl = config.getString("twitter4s.test.base-url") val _relativeUrl = config.getString("twitter4s.test.relative-url") val _headers = Seq.empty[HttpHeader] val _queryString = Map("screen_name" → Seq("codewarrior")) val oauthConsumerSecret = config.getString("twitter4s.test.oauth-consumer-secret") val oauthConsumerKey = config.getString("twitter4s.test.oauth-consumer-key") val oauthToken = config.getString("twitter4s.test.oauth-token") val oauthTokenSecret = config.getString("twitter4s.test.oauth-token-secret") val twAuthHeaderGen = TwitterAuthorizationHeader.generate( oauthConsumerKey = oauthConsumerKey, oauthToken = oauthToken, oauthConsumerSecret = oauthConsumerSecret, oauthTokenSecret = oauthTokenSecret)(_) val request = TwitterTimelineRequest( baseUrl = _baseUrl, relativeUrl = _relativeUrl, headers = _headers, method = GetMethod, queryString = _queryString, body = None, paginated = false, authHeaderGen = twAuthHeaderGen) "Twitter request" should "properly fetch a user's timeline" in { val authHeader = twAuthHeaderGen(request) val authRequest = request.copy( headers = request.headers ++ Seq(authHeader)) val conn = new PlayWSHttpConnection val respF = conn.makeRequest(authRequest) val resp = Await.result(respF, 10.seconds) assert(resp.status.equals(200)) assert(resp.json.toString().contains("created_at")) } }
SocialOrra/social4s
twitter4s/src/test/scala/twitter4s/TwitterRequestSpec.scala
Scala
apache-2.0
2,024
package com.twitter.finagle.dispatch import com.twitter.finagle.stats.InMemoryStatsReceiver import com.twitter.finagle.transport.Transport import com.twitter.finagle.{Failure, WriteException} import com.twitter.util.{Future, Promise, Return, Throw} import org.junit.runner.RunWith import org.mockito.Matchers._ import org.mockito.Mockito.{times, verify, when} import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import org.scalatest.mock.MockitoSugar @RunWith(classOf[JUnitRunner]) class ClientDispatcherTest extends FunSuite with MockitoSugar { class DispatchHelper { val stats = new InMemoryStatsReceiver() val trans = mock[Transport[String, String]] val disp = new SerialClientDispatcher[String, String](trans, stats) } test("ClientDispatcher should dispatch requests") { val h = new DispatchHelper import h._ when(trans.write("one")) thenReturn Future.value(()) val p = new Promise[String] when(trans.read()) thenReturn p val f = disp("one") verify(trans).write("one") verify(trans).read() assert(!f.isDefined) p.setValue("ok: one") assert(f.poll == Some(Return("ok: one"))) } test("ClientDispatcher should dispatch requests one-at-a-time") { val h = new DispatchHelper import h._ when(trans.write(any[String])) thenReturn Future.value(()) val p0, p1 = new Promise[String] when(trans.read()) thenReturn p0 val f0 = disp("one") verify(trans).write(any[String]) verify(trans).read() val f1 = disp("two") verify(trans).write(any[String]) verify(trans).read() assert(!f0.isDefined) assert(!f1.isDefined) when(trans.read()) thenReturn p1 p0.setValue("ok: one") assert(f0.poll == Some(Return("ok: one"))) verify(trans, times(2)).write(any[String]) verify(trans, times(2)).read() assert(!f1.isDefined) p1.setValue("ok: two") assert(p1.poll == Some(Return("ok: two"))) } test("ClientDispatcher should interrupt when close transport and cancel pending requests") { val h = new DispatchHelper import h._ when(trans.write(any[String])) thenReturn Future.value(()) val p0 = new Promise[String] when(trans.read()) thenReturn p0 val f0 = disp("zero") val f1 = disp("one") verify(trans).write("zero") verify(trans).read() assert(!f0.isDefined) assert(!f1.isDefined) val intr = new Exception f0.raise(intr) verify(trans).close() assert(f0.poll == Some(Throw(intr))) } test("ClientDispatcher should interrupt when ignore pending") { val h = new DispatchHelper import h._ when(trans.write(any[String])) thenReturn Future.value(()) val p0 = new Promise[String] when(trans.read()) thenReturn p0 val f0 = disp("zero") val f1 = disp("one") verify(trans).write("zero") verify(trans).read() assert(!f0.isDefined) assert(!f1.isDefined) val intr = new Exception f1.raise(intr) verify(trans, times(0)).close() assert(!f0.isDefined) assert(!f1.isDefined) p0.setValue("ok") assert(f0.poll == Some(Return("ok"))) assert(f1.poll == Some(Throw(Failure(intr, Failure.Interrupted)))) verify(trans).write(any[String]) } test("ClientDispatcher should rewrite WriteExceptions") { val h = new DispatchHelper import h._ val exc = mock[Exception] when(trans.write(any[String])) thenReturn Future.exception(exc) val resultOpt = disp("hello").poll assert(resultOpt.isDefined) assert(resultOpt.get.isThrow) val result: Throwable = resultOpt.get.asInstanceOf[Throw[String]].e assert(result.isInstanceOf[WriteException]) assert(result.getCause == exc) } test("ClientDispatcher queue_size gauge") { val h = new DispatchHelper import h._ def assertGaugeSize(size: Int): Unit = assert(stats.gauges(Seq("serial", "queue_size"))() == size) assertGaugeSize(0) val p = new Promise[String]() when(trans.write(any[String])).thenReturn(Future.Done) when(trans.read()).thenReturn(p) disp("0") assertGaugeSize(0) // 1 issued, but none pending disp("1") disp("2") assertGaugeSize(2) // 1 issued, now 2 pending p.setValue("done") assertGaugeSize(0) } }
liamstewart/finagle
finagle-core/src/test/scala/com/twitter/finagle/dispatch/ClientDispatcherTest.scala
Scala
apache-2.0
4,254
object Macros { import scala.language.implicitConversions implicit def foo(x: String): Option[Int] = macro Impls.foo } object Test extends App { import Macros._ println("2": Option[Int]) val s: Int = "2" getOrElse 0 println(s) }
som-snytt/dotty
tests/disabled/macro/run/macro-expand-implicit-macro-is-implicit/Macros_Test_2.scala
Scala
apache-2.0
242
package org.jetbrains.plugins.scala package lang package psi package impl package expr package xml import _root_.org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScalaType} import com.intellij.lang.ASTNode import org.jetbrains.plugins.scala.lang.psi.api.expr.xml._ import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject import org.jetbrains.plugins.scala.lang.psi.types.api.{Any, Nothing} import org.jetbrains.plugins.scala.lang.psi.types.result._ /** * @author Alexander Podkhalyuzin */ class ScXmlExprImpl(node: ASTNode) extends ScalaPsiElementImpl (node) with ScXmlExpr{ override def toString: String = "XmlExpression" protected override def innerType: TypeResult = { def getType(s: String): ScType = { val typez = ScalaPsiManager.instance(getProject).getCachedClasses(getResolveScope, s).filter(!_.isInstanceOf[ScObject]) if (typez.length != 0) ScalaType.designator(typez(0)) else Nothing } Right(getElements.length match { case 0 => Any case 1 => getElements.head match { case _: ScXmlElement => getType("scala.xml.Elem") case _: ScXmlComment => getType("scala.xml.Comment") case _: ScXmlCDSect => getType("scala.xml.Text") case _: ScXmlPI => getType("scala.xml.ProcInstr") } case _ => getType("scala.xml.NodeBuffer") }) } }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/expr/xml/ScXmlExprImpl.scala
Scala
apache-2.0
1,379
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openwhisk.core.entity object Annotations { val FinalParamsAnnotationName = "final" val WebActionAnnotationName = "web-export" val WebCustomOptionsAnnotationName = "web-custom-options" val RawHttpAnnotationName = "raw-http" val RequireWhiskAuthAnnotation = "require-whisk-auth" val ProvideApiKeyAnnotationName = "provide-api-key" val InvokerResourcesAnnotationName = "invoker-resources" val InvokerResourcesStrictPolicyAnnotationName = "invoker-resources-strict-policy" }
style95/openwhisk
common/scala/src/main/scala/org/apache/openwhisk/core/entity/Annotations.scala
Scala
apache-2.0
1,310
import scala.tools.partest.Util.ArrayDeep import scala.reflect.runtime.universe._ import scala.reflect.{ClassTag, classTag} object Test extends App { Test1 Test2 } class Foo[T](x: T) trait Bar[T] { def f: T } object Test1 extends TestUtil { print(()) print(true) print('a') print(1) print("abc") print('abc) println() print(List(())) print(List(true)) print(List(1)) print(List("abc")) print(List('abc)) println() //print(Array(())) //Illegal class name "[V" in class file Test$ print(Array(true)) print(Array('a')) print(Array(1)) print(Array("abc")) print(Array('abc)) println() print(((), ())) print((true, false)) print((1, 2)) print(("abc", "xyz")) print(('abc, 'xyz)) println() print(Test) print(List) println() print(new Foo(2)) print(new Foo(List(2))) print(new Foo(new Foo(2))) print(new Foo(List(new Foo(2)))) println() print(new Bar[String] { def f = "abc" }); {print(new Bar[String] { def f = "abc" })} println() } object Test2 { import Marshal._ println("()="+load[Unit](dump(()))) println("true="+load[Boolean](dump(true))) println("a="+load[Char](dump('a'))) println("1="+load[Int](dump(1))) println("'abc="+load[scala.Symbol](dump('abc))) println() println("List(())="+load[List[Unit]](dump(List(())))) println("List(true)="+load[List[Boolean]](dump(List(true)))) println("List('abc)="+load[List[scala.Symbol]](dump(List('abc)))) println() def loadArray[T](x: Array[Byte])(implicit t: reflect.ClassTag[Array[T]]) = load[Array[T]](x)(t).deep.toString println("Array()="+loadArray[Int](dump(Array(): Array[Int]))) println("Array(true)="+loadArray[Boolean](dump(Array(true)))) println("Array(a)="+loadArray[Char](dump(Array('a')))) println("Array(1)="+loadArray[Int](dump(Array(1)))) println() println("((),())="+load[(Unit, Unit)](dump(((), ())))) println("(true,false)="+load[(Boolean, Boolean)](dump((true, false)))) println() println("List(List(1), List(2))="+load[List[List[Int]]](dump(List(List(1), List(2))))) println() println("Array(Array(1), Array(2))="+loadArray[Array[Int]](dump(Array(Array(1), Array(2))))) println() } object Marshal { import java.io._ import scala.reflect.ClassTag def dump[A](o: A)(implicit t: ClassTag[A]): Array[Byte] = { val ba = new ByteArrayOutputStream(512) val out = new ObjectOutputStream(ba) out.writeObject(t) out.writeObject(o) out.close() ba.toByteArray() } @throws(classOf[IOException]) @throws(classOf[ClassCastException]) @throws(classOf[ClassNotFoundException]) def load[A](buffer: Array[Byte])(implicit expected: ClassTag[A]): A = { val in = new ObjectInputStream(new ByteArrayInputStream(buffer)) val found = in.readObject.asInstanceOf[ClassTag[_]] try { found.runtimeClass.asSubclass(expected.runtimeClass) in.readObject.asInstanceOf[A] } catch { case _: ClassCastException => in.close() throw new ClassCastException("type mismatch;"+ "\\n found : "+found+ "\\n required: "+expected) } } } trait TestUtil { import java.io._ def write[A](o: A): Array[Byte] = { val ba = new ByteArrayOutputStream(512) val out = new ObjectOutputStream(ba) out.writeObject(o) out.close() ba.toByteArray() } def read[A](buffer: Array[Byte]): A = { val in = new ObjectInputStream(new ByteArrayInputStream(buffer)) in.readObject().asInstanceOf[A] } def print[T](x: T)(implicit t: TypeTag[T]): Unit = { // todo. type tags are not yet serializable // val t1: TypeTag[T] = read(write(t)) val t1: TypeTag[T] = t val x1 = x.toString.replaceAll("@[0-9a-z]+$", "") println("x="+x1+", t="+t1+", k="+t1.tpe.asInstanceOf[Product].productPrefix+", s="+t1.tpe.typeSymbol.toString) } }
lrytz/scala
test/files/jvm/manifests-new.scala
Scala
apache-2.0
3,839
package ru.mipt.acsl.decode.idea.plugin import com.intellij.openapi.actionSystem.DefaultActionGroup /** * @author Artem Shein */ class DecodeActionGroup extends DefaultActionGroup
acsl-mipt/decode-idea-plugin
src/main/scala/ru/mipt/acsl/decode/idea/plugin/DecodeActionGroup.scala
Scala
mit
184
package jobs import com.quantifind.sumac.validation.Required import com.quantifind.sumac.{ArgMain, FieldArgs} import core.utils.SparkUtils import org.apache.accumulo.core.client.ZooKeeperInstance import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat import org.apache.accumulo.core.client.mapreduce.lib.util.{ConfiguratorBase => CB, InputConfigurator => IC} import org.apache.accumulo.core.client.security.tokens.PasswordToken import org.apache.accumulo.core.data.{Key, Range => ARange, Value} import org.apache.spark._ import scala.collection.JavaConversions._ class AccumuloTestArgs extends FieldArgs { @Required var zookeeper: String = _ @Required var instance : String = _ @Required var user : String = _ @Required var password : String = _ @Required var table : String = _ } object AccumuloTest extends ArgMain[AccumuloTestArgs] with Logging { def main(args: AccumuloTestArgs) { val sc = SparkUtils.createSparkContext("AccumuloTest") val conf = sc.hadoopConfiguration val user = args.user val authToken = new PasswordToken(args.password) val instance = new ZooKeeperInstance(args.instance, args.zookeeper) val table = args.table CB.setConnectorInfo(classOf[AccumuloInputFormat], conf, user, authToken) CB.setZooKeeperInstance(classOf[AccumuloInputFormat], conf, instance.getInstanceName, instance.getZooKeepers) IC.setInputTableName(classOf[AccumuloInputFormat], conf, table) IC.setRanges(classOf[AccumuloInputFormat], conf, List(new ARange())) val rdd = sc.newAPIHadoopRDD(conf, classOf[AccumuloInputFormat], classOf[Key], classOf[Value]) println(s"RECORD COUNT IS: ${rdd.count()}") } }
pomadchin/accumulo-spark-sample
src/main/scala/jobs/AccumuloTest.scala
Scala
apache-2.0
1,686
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.avocado.genotyping import org.bdgenomics.adam.models.{ ReferencePosition, SequenceDictionary, SequenceRecord, VariantContext } import org.bdgenomics.avocado.models.{ Observation, AlleleObservation } import org.bdgenomics.formats.avro.Contig import org.scalatest.FunSuite import scala.collection.JavaConversions._ import scala.math.{ abs, sqrt } class BiallelicGenotyperSuite extends FunSuite { val ba = new BiallelicGenotyper(SequenceDictionary(SequenceRecord("ctg", 1000L))) val floatingPointingThreshold = 1e-6 def assertAlmostEqual(a: Double, b: Double, epsilon: Double = floatingPointingThreshold) { if (!(a * 0.99 < b && a * 1.01 > b) && !(abs(a - b) < epsilon)) { throw new AssertionError(a + " != " + b) } } test("score genotype for single sample, all bases ref") { val observed = Iterable( AlleleObservation(ReferencePosition("ctg", 0L), 1, "C", 30, 30, true, "mySample"), AlleleObservation(ReferencePosition("ctg", 0L), 1, "C", 40, 40, true, "mySample"), AlleleObservation(ReferencePosition("ctg", 0L), 1, "C", 30, 40, true, "mySample")) val expected = Array(8.0 * (0.001 * 0.0001 * sqrt(0.001 * 0.0001)) / 8.0, 0.125, 8.0 * (0.999 * 0.9999 * (1.0 - sqrt(0.001 * 0.0001))) / 8.0) val scored = ba.scoreGenotypeLikelihoods("C", "A", observed) assertAlmostEqual(expected(0), scored._2(0)) assertAlmostEqual(expected(1), scored._2(1)) assertAlmostEqual(expected(2), scored._2(2)) assert(scored._2.max == scored._2(2)) } test("score genotype for single sample, mix of ref/non-ref bases") { val observed = Iterable( AlleleObservation(ReferencePosition("ctg", 0L), 1, "C", 30, 30, true, "mySample"), AlleleObservation(ReferencePosition("ctg", 0L), 1, "C", 40, 40, true, "mySample"), AlleleObservation(ReferencePosition("ctg", 0L), 1, "A", 30, 40, true, "mySample")) val expected = List(8.0 * (0.001 * 0.0001 * (1.0 - sqrt(0.001 * 0.0001))) / 8.0, 0.125, 8.0 * (0.999 * 0.9999 * sqrt(0.001 * 0.0001)) / 8.0) val scored = ba.scoreGenotypeLikelihoods("C", "A", observed) assertAlmostEqual(expected(0), scored._2(0)) assertAlmostEqual(expected(1), scored._2(1)) assertAlmostEqual(expected(2), scored._2(2)) assert(scored._2.max == scored._2(1)) } test("score genotype for single sample, all bases non-ref") { val observed = Iterable( AlleleObservation(ReferencePosition("ctg", 0L), 1, "A", 30, 30, true, "mySample"), AlleleObservation(ReferencePosition("ctg", 0L), 1, "A", 40, 40, true, "mySample"), AlleleObservation(ReferencePosition("ctg", 0L), 1, "A", 30, 40, true, "mySample")) val expected = List(8.0 * (0.999 * 0.9999 * (1.0 - sqrt(0.001 * 0.0001))) / 8.0, 0.125, 8.0 * (0.001 * 0.0001 * sqrt(0.001 * 0.0001)) / 8.0) val scored = ba.scoreGenotypeLikelihoods("C", "A", observed) assertAlmostEqual(expected(0), scored._2(0)) assertAlmostEqual(expected(1), scored._2(1)) assertAlmostEqual(expected(2), scored._2(2)) assert(scored._2.max == scored._2(0)) } }
tdanford/avocado
avocado-core/src/test/scala/org/bdgenomics/avocado/genotyping/BiallelicGenotyperSuite.scala
Scala
apache-2.0
4,430
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.admin import java.util.Properties import joptsimple._ import kafka.common.Config import kafka.common.InvalidConfigException import kafka.log.LogConfig import kafka.server.{ConfigEntityName, ConfigType, DynamicConfig} import kafka.utils.CommandLineUtils import kafka.utils.Implicits._ import kafka.zk.{AdminZkClient, KafkaZkClient} import kafka.zookeeper.ZooKeeperClient import org.apache.kafka.common.security.JaasUtils import org.apache.kafka.common.security.scram._ import org.apache.kafka.common.utils.{Sanitizer, Utils} import scala.collection._ import scala.collection.JavaConverters._ /** * This script can be used to change configs for topics/clients/brokers dynamically * This script can be used to change configs for topics/clients/users/brokers dynamically * An entity described or altered by the command may be one of: * <ul> * <li> topic: --entity-type topics --entity-name <topic> * <li> client: --entity-type clients --entity-name <client-id> * <li> user: --entity-type users --entity-name <user-principal> * <li> <user, client>: --entity-type users --entity-name <user-principal> --entity-type clients --entity-name <client-id> * <li> broker: --entity-type brokers --entity-name <broker> * </ul> * --entity-default may be used instead of --entity-name when describing or altering default configuration for users and clients. * */ object ConfigCommand extends Config { val DefaultScramIterations = 4096 def main(args: Array[String]): Unit = { val opts = new ConfigCommandOptions(args) if(args.length == 0) CommandLineUtils.printUsageAndDie(opts.parser, "Add/Remove entity config for a topic, client, user or broker") opts.checkArgs() val zooKeeperClient = new ZooKeeperClient(opts.options.valueOf(opts.zkConnectOpt), 30000, 30000, Int.MaxValue) val zkClient = new KafkaZkClient(zooKeeperClient, JaasUtils.isZkSecurityEnabled()) val adminZkClient = new AdminZkClient(zkClient) try { if (opts.options.has(opts.alterOpt)) alterConfig(zkClient, opts, adminZkClient) else if (opts.options.has(opts.describeOpt)) describeConfig(zkClient, opts, adminZkClient) } catch { case e: Throwable => println("Error while executing config command " + e.getMessage) println(Utils.stackTrace(e)) } finally { zkClient.close() } } private[admin] def alterConfig(zkClient: KafkaZkClient, opts: ConfigCommandOptions, adminZkClient: AdminZkClient) { val configsToBeAdded = parseConfigsToBeAdded(opts) val configsToBeDeleted = parseConfigsToBeDeleted(opts) val entity = parseEntity(opts) val entityType = entity.root.entityType val entityName = entity.fullSanitizedName if (entityType == ConfigType.User) preProcessScramCredentials(configsToBeAdded) // compile the final set of configs val configs = adminZkClient.fetchEntityConfig(entityType, entityName) // fail the command if any of the configs to be deleted does not exist val invalidConfigs = configsToBeDeleted.filterNot(configs.containsKey(_)) if (invalidConfigs.nonEmpty) throw new InvalidConfigException(s"Invalid config(s): ${invalidConfigs.mkString(",")}") configs ++= configsToBeAdded configsToBeDeleted.foreach(configs.remove(_)) adminZkClient.changeConfigs(entityType, entityName, configs) println(s"Completed Updating config for entity: $entity.") } private def preProcessScramCredentials(configsToBeAdded: Properties) { def scramCredential(mechanism: ScramMechanism, credentialStr: String): String = { val pattern = "(?:iterations=([0-9]*),)?password=(.*)".r val (iterations, password) = credentialStr match { case pattern(iterations, password) => (if (iterations != null) iterations.toInt else DefaultScramIterations, password) case _ => throw new IllegalArgumentException(s"Invalid credential property $mechanism=$credentialStr") } if (iterations < mechanism.minIterations()) throw new IllegalArgumentException(s"Iterations $iterations is less than the minimum ${mechanism.minIterations()} required for $mechanism") val credential = new ScramFormatter(mechanism).generateCredential(password, iterations) ScramCredentialUtils.credentialToString(credential) } for (mechanism <- ScramMechanism.values) { configsToBeAdded.getProperty(mechanism.mechanismName) match { case null => case value => configsToBeAdded.setProperty(mechanism.mechanismName, scramCredential(mechanism, value)) } } } private def describeConfig(zkClient: KafkaZkClient, opts: ConfigCommandOptions, adminZkClient: AdminZkClient) { val configEntity = parseEntity(opts) val describeAllUsers = configEntity.root.entityType == ConfigType.User && !configEntity.root.sanitizedName.isDefined && !configEntity.child.isDefined val entities = configEntity.getAllEntities(zkClient) for (entity <- entities) { val configs = adminZkClient.fetchEntityConfig(entity.root.entityType, entity.fullSanitizedName) // When describing all users, don't include empty user nodes with only <user, client> quota overrides. if (!configs.isEmpty || !describeAllUsers) { println("Configs for %s are %s" .format(entity, configs.asScala.map(kv => kv._1 + "=" + kv._2).mkString(","))) } } } private[admin] def parseConfigsToBeAdded(opts: ConfigCommandOptions): Properties = { val props = new Properties if (opts.options.has(opts.addConfig)) { //split by commas, but avoid those in [], then into KV pairs val pattern = "(?=[^\\\\]]*(?:\\\\[|$))" val configsToBeAdded = opts.options.valueOf(opts.addConfig) .split("," + pattern) .map(_.split("""\\s*=\\s*""" + pattern)) require(configsToBeAdded.forall(config => config.length == 2), "Invalid entity config: all configs to be added must be in the format \\"key=val\\".") //Create properties, parsing square brackets from values if necessary configsToBeAdded.foreach(pair => props.setProperty(pair(0).trim, pair(1).replaceAll("\\\\[?\\\\]?", "").trim)) if (props.containsKey(LogConfig.MessageFormatVersionProp)) { println(s"WARNING: The configuration ${LogConfig.MessageFormatVersionProp}=${props.getProperty(LogConfig.MessageFormatVersionProp)} is specified. " + s"This configuration will be ignored if the version is newer than the inter.broker.protocol.version specified in the broker.") } } props } private[admin] def parseConfigsToBeDeleted(opts: ConfigCommandOptions): Seq[String] = { if (opts.options.has(opts.deleteConfig)) { val configsToBeDeleted = opts.options.valuesOf(opts.deleteConfig).asScala.map(_.trim()) val propsToBeDeleted = new Properties configsToBeDeleted.foreach(propsToBeDeleted.setProperty(_, "")) configsToBeDeleted } else Seq.empty } case class Entity(entityType: String, sanitizedName: Option[String]) { val entityPath = sanitizedName match { case Some(n) => entityType + "/" + n case None => entityType } override def toString: String = { val typeName = entityType match { case ConfigType.User => "user-principal" case ConfigType.Client => "client-id" case ConfigType.Topic => "topic" case t => t } sanitizedName match { case Some(ConfigEntityName.Default) => "default " + typeName case Some(n) => val desanitized = if (entityType == ConfigType.User || entityType == ConfigType.Client) Sanitizer.desanitize(n) else n s"$typeName '$desanitized'" case None => entityType } } } case class ConfigEntity(root: Entity, child: Option[Entity]) { val fullSanitizedName = root.sanitizedName.getOrElse("") + child.map(s => "/" + s.entityPath).getOrElse("") def getAllEntities(zkClient: KafkaZkClient) : Seq[ConfigEntity] = { // Describe option examples: // Describe entity with specified name: // --entity-type topics --entity-name topic1 (topic1) // Describe all entities of a type (topics/brokers/users/clients): // --entity-type topics (all topics) // Describe <user, client> quotas: // --entity-type users --entity-name user1 --entity-type clients --entity-name client2 (<user1, client2>) // --entity-type users --entity-name userA --entity-type clients (all clients of userA) // --entity-type users --entity-type clients (all <user, client>s)) // Describe default quotas: // --entity-type users --entity-default (Default user) // --entity-type users --entity-default --entity-type clients --entity-default (Default <user, client>) (root.sanitizedName, child) match { case (None, _) => val rootEntities = zkClient.getAllEntitiesWithConfig(root.entityType) .map(name => ConfigEntity(Entity(root.entityType, Some(name)), child)) child match { case Some(s) => rootEntities.flatMap(rootEntity => ConfigEntity(rootEntity.root, Some(Entity(s.entityType, None))).getAllEntities(zkClient)) case None => rootEntities } case (_, Some(childEntity)) => childEntity.sanitizedName match { case Some(_) => Seq(this) case None => zkClient.getAllEntitiesWithConfig(root.entityPath + "/" + childEntity.entityType) .map(name => ConfigEntity(root, Some(Entity(childEntity.entityType, Some(name))))) } case (_, None) => Seq(this) } } override def toString: String = { root.toString + child.map(s => ", " + s.toString).getOrElse("") } } private[admin] def parseEntity(opts: ConfigCommandOptions): ConfigEntity = { val entityTypes = opts.options.valuesOf(opts.entityType).asScala if (entityTypes.head == ConfigType.User || entityTypes.head == ConfigType.Client) parseQuotaEntity(opts) else { // Exactly one entity type and at-most one entity name expected for other entities val name = if (opts.options.has(opts.entityName)) Some(opts.options.valueOf(opts.entityName)) else None ConfigEntity(Entity(entityTypes.head, name), None) } } private def parseQuotaEntity(opts: ConfigCommandOptions): ConfigEntity = { val types = opts.options.valuesOf(opts.entityType).asScala val namesIterator = opts.options.valuesOf(opts.entityName).iterator val names = opts.options.specs.asScala .filter(spec => spec.options.contains("entity-name") || spec.options.contains("entity-default")) .map(spec => if (spec.options.contains("entity-name")) namesIterator.next else "") if (opts.options.has(opts.alterOpt) && names.size != types.size) throw new IllegalArgumentException("--entity-name or --entity-default must be specified with each --entity-type for --alter") val reverse = types.size == 2 && types(0) == ConfigType.Client val entityTypes = if (reverse) types.reverse else types val sortedNames = (if (reverse && names.length == 2) names.reverse else names).iterator def sanitizeName(entityType: String, name: String) = { if (name.isEmpty) ConfigEntityName.Default else { entityType match { case ConfigType.User | ConfigType.Client => Sanitizer.sanitize(name) case _ => throw new IllegalArgumentException("Invalid entity type " + entityType) } } } val entities = entityTypes.map(t => Entity(t, if (sortedNames.hasNext) Some(sanitizeName(t, sortedNames.next)) else None)) ConfigEntity(entities.head, if (entities.size > 1) Some(entities(1)) else None) } class ConfigCommandOptions(args: Array[String]) { val parser = new OptionParser(false) val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the form host:port. " + "Multiple URLS can be given to allow fail-over.") .withRequiredArg .describedAs("urls") .ofType(classOf[String]) val alterOpt = parser.accepts("alter", "Alter the configuration for the entity.") val describeOpt = parser.accepts("describe", "List configs for the given entity.") val entityType = parser.accepts("entity-type", "Type of entity (topics/clients/users/brokers)") .withRequiredArg .ofType(classOf[String]) val entityName = parser.accepts("entity-name", "Name of entity (topic name/client id/user principal name/broker id)") .withRequiredArg .ofType(classOf[String]) val entityDefault = parser.accepts("entity-default", "Default entity name for clients/users (applies to corresponding entity type in command line)") val nl = System.getProperty("line.separator") val addConfig = parser.accepts("add-config", "Key Value pairs of configs to add. Square brackets can be used to group values which contain commas: 'k1=v1,k2=[v1,v2,v2],k3=v3'. The following is a list of valid configurations: " + "For entity_type '" + ConfigType.Topic + "': " + LogConfig.configNames.map("\\t" + _).mkString(nl, nl, nl) + "For entity_type '" + ConfigType.Broker + "': " + DynamicConfig.Broker.names.asScala.map("\\t" + _).mkString(nl, nl, nl) + "For entity_type '" + ConfigType.User + "': " + DynamicConfig.User.names.asScala.map("\\t" + _).mkString(nl, nl, nl) + "For entity_type '" + ConfigType.Client + "': " + DynamicConfig.Client.names.asScala.map("\\t" + _).mkString(nl, nl, nl) + s"Entity types '${ConfigType.User}' and '${ConfigType.Client}' may be specified together to update config for clients of a specific user.") .withRequiredArg .ofType(classOf[String]) val deleteConfig = parser.accepts("delete-config", "config keys to remove 'k1,k2'") .withRequiredArg .ofType(classOf[String]) .withValuesSeparatedBy(',') val helpOpt = parser.accepts("help", "Print usage information.") val forceOpt = parser.accepts("force", "Suppress console prompts") val options = parser.parse(args : _*) val allOpts: Set[OptionSpec[_]] = Set(alterOpt, describeOpt, entityType, entityName, addConfig, deleteConfig, helpOpt) def checkArgs() { // should have exactly one action val actions = Seq(alterOpt, describeOpt).count(options.has _) if(actions != 1) CommandLineUtils.printUsageAndDie(parser, "Command must include exactly one action: --describe, --alter") // check required args CommandLineUtils.checkRequiredArgs(parser, options, zkConnectOpt, entityType) CommandLineUtils.checkInvalidArgs(parser, options, alterOpt, Set(describeOpt)) CommandLineUtils.checkInvalidArgs(parser, options, describeOpt, Set(alterOpt, addConfig, deleteConfig)) val entityTypeVals = options.valuesOf(entityType).asScala if(options.has(alterOpt)) { if (entityTypeVals.contains(ConfigType.User) || entityTypeVals.contains(ConfigType.Client)) { if (!options.has(entityName) && !options.has(entityDefault)) throw new IllegalArgumentException("--entity-name or --entity-default must be specified with --alter of users/clients") } else if (!options.has(entityName)) throw new IllegalArgumentException(s"--entity-name must be specified with --alter of ${entityTypeVals}") val isAddConfigPresent: Boolean = options.has(addConfig) val isDeleteConfigPresent: Boolean = options.has(deleteConfig) if(! isAddConfigPresent && ! isDeleteConfigPresent) throw new IllegalArgumentException("At least one of --add-config or --delete-config must be specified with --alter") } entityTypeVals.foreach(entityTypeVal => if (!ConfigType.all.contains(entityTypeVal)) throw new IllegalArgumentException(s"Invalid entity-type ${entityTypeVal}, --entity-type must be one of ${ConfigType.all}") ) if (entityTypeVals.isEmpty) throw new IllegalArgumentException("At least one --entity-type must be specified") else if (entityTypeVals.size > 1 && !entityTypeVals.toSet.equals(Set(ConfigType.User, ConfigType.Client))) throw new IllegalArgumentException(s"Only '${ConfigType.User}' and '${ConfigType.Client}' entity types may be specified together") } } }
themarkypantz/kafka
core/src/main/scala/kafka/admin/ConfigCommand.scala
Scala
apache-2.0
17,391
/* * Copyright (c) 2016 Frank S. Thomas * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package shapeless import org.junit.Assert._ import org.junit.Test import scala.concurrent.duration.Duration import shapeless.test.compileTime class CompileTimeTests { @Test def testCompileTime: Unit = { assertTrue(compileTime(""" val x = 42 """) > Duration.Zero) } }
isaka/shapeless
core/src/test/scala/shapeless/compiletime.scala
Scala
apache-2.0
884
/** * Clase de utilidad para representar conjuntos de tweets con temas de * google y apple, junto con un objeto de la clase Tendencia con todos * ellos */ object TerminosGoogleApple { // Lista de terminos de interes para google val google = List("android", "Android", "galaxy", "Galaxy", "nexus", "Nexus") // Lista de terminos de interes para apple val apple = List("ios", "iOS", "iphone", "iPhone", "ipad", "iPad") // Conjuntos de tweets para ambas listas de terminos (filter) val mensajesGoogle: ConjuntoTweet = LectorTweets.obtenerConjuntoConTerminos(google) val mensajesApple: ConjuntoTweet = LectorTweets.obtenerConjuntoConTerminos(apple) // Se genera la lista completa de mensajes de ambos temas (union) val tendencia: Tendencia = mensajesApple.union(mensajesGoogle).ordenacionAscendentePorRetweet } /** * Clase para probar la funcionalidad */ object Main extends App { // obtiene los mensajes que son comunes en ambos conjuntos val mensajesComunes = TerminosGoogleApple.mensajesGoogle.interseccion(TerminosGoogleApple.mensajesApple) // funcion recursiva auxiliar para obtener el tweet con mas RT de una tendencia, es decir, el ultimo def obtenerRTUltimoMensajeDeTendencia(tendencia: Tendencia): Integer = if (tendencia.tail.isEmpty) tendencia.head.retweets else obtenerRTUltimoMensajeDeTendencia(tendencia.tail) print("1. Numero de mensajes en:\\n") print("Google: ") print(TerminosGoogleApple.mensajesGoogle.numeroMensajes) print("\\nApple: ") print(TerminosGoogleApple.mensajesApple.numeroMensajes) print("\\n\\n") print("2. Numero de mensajes en la tendencia: ") print(TerminosGoogleApple.tendencia.length) print("\\n\\n") print("3. Numero de mensajes comunes: ") print(mensajesComunes.numeroMensajes) print("\\n\\n") print("4. Orden de influencia de los mensajes comunes:\\n") mensajesComunes.ordenacionAscendentePorRetweet.foreach(t => println(t)) print("\\n") print("5. Maximo y minimo numero de retweets en los mensajes comunes:\\n") print("Maximo: ") print(obtenerRTUltimoMensajeDeTendencia(mensajesComunes.ordenacionAscendentePorRetweet)) print("\\nMinimo: ") print(mensajesComunes.ordenacionAscendentePorRetweet.head.retweets) print("\\n\\n") print("6. Maximo y minimo de retweets en toda la coleccion de tendencia:\\n") print("Maximo: ") print(obtenerRTUltimoMensajeDeTendencia(TerminosGoogleApple.tendencia)) print("\\nMinimo: ") print(TerminosGoogleApple.tendencia.head.retweets) print("\\n\\n") }
fblupi/grado_informatica-NTP
Practicas/P4/src/Main.scala
Scala
gpl-2.0
2,510
package sample.model.account import scalikejdbc._ import sample._ import sample.context._ import sample.context.orm.SkinnyORMMapper /** * 口座に紐づく金融機関口座を表現します。 * <p>口座を相手方とする入出金で利用します。 * low: サンプルなので支店や名称、名義といった本来必須な情報をかなり省略しています。(通常は全銀仕様を踏襲します) */ case class FiAccount( /** ID */ id: Long, /** 口座ID */ accountId: String, /** 利用用途カテゴリ */ category: String, /** 通貨 */ currency: String, /** 金融機関コード */ fiCode: String, /** 金融機関口座ID */ fiAccountId: String) extends Entity object FiAccount extends SkinnyORMMapper[FiAccount] { override def extract(rs: WrappedResultSet, rn: ResultName[FiAccount]): FiAccount = autoConstruct(rs, rn) def load(accountId: String, category: String, currency: String)(implicit s: DBSession): FiAccount = withAlias { m => findBy( sqls.eq(m.accountId, accountId) .and(sqls.eq(m.category, category)) .and(sqls.eq(m.currency, currency))) .getOrElse( throw ValidationException(ErrorKeys.EntityNotFound)) } }
jkazama/sample-boot-scala
src/main/scala/sample/model/account/FiAccount.scala
Scala
mit
1,239
package bad.robot.radiate import scalaz.{-\\/, \\/-, \\/} object Sequence { def sequence[E, A](list: List[E \\/ A]): List[E] \\/ List[A] = { list.partition(_.isLeft) match { case (Nil, successes) => \\/-(for (\\/-(success) <- successes) yield success) case (errors, _) => -\\/(for (-\\/(error) <- errors) yield error) } } }
tobyweston/radiate
src/main/scala/bad/robot/radiate/Sequence.scala
Scala
apache-2.0
340
/*********************************************************************** * Copyright (c) 2013-2016 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.kafka import java.util.Date import com.typesafe.scalalogging.LazyLogging import org.geotools.factory.CommonFactoryFinder import org.geotools.feature.simple.{SimpleFeatureBuilder, SimpleFeatureTypeBuilder} import org.joda.time.{Duration, Instant} import org.locationtech.geomesa.filter._ import org.locationtech.geomesa.utils.geotools.Conversions._ import org.locationtech.geomesa.utils.index.{SpatialIndex, WrappedQuadtree} import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType} import org.opengis.filter._ import org.opengis.filter.expression.PropertyName import scala.collection.JavaConverters._ import scala.collection.mutable object ReplayTimeHelper { val ff = CommonFactoryFinder.getFilterFactory2 val AttributeName: String = "KafkaLogTime" val AttributeProp: PropertyName = ff.property(AttributeName) def addReplayTimeAttribute(builder: SimpleFeatureTypeBuilder): Unit = builder.add(AttributeName, classOf[Date]) def toFilter(time: Instant): Filter = ff.equals(AttributeProp, ff.literal(time.toDate)) def fromFilter(filter: PropertyIsEqualTo): Option[Long] = { checkOrder(filter.getExpression1, filter.getExpression2) .filter(pl => pl.name == AttributeName && pl.literal.getValue.isInstanceOf[Date]) .map(pl => new Instant(pl.literal.getValue.asInstanceOf[Date]).getMillis) } } /** @param sft the [[SimpleFeatureType]] - must contain the replay time attribute * @param replayTime the current replay time */ class ReplayTimeHelper(sft: SimpleFeatureType, replayTime: Long) { import ReplayTimeHelper._ private val replayDate = new java.util.Date(replayTime) private val builder = new SimpleFeatureBuilder(sft) private val attrIndex = sft.indexOf(AttributeName) require(attrIndex >= 0, s"Invalid SFT. The $AttributeName attribute is missing.") /** Copy the given ``sf`` and add a value for the replay time attribute. */ def reType(sf: SimpleFeature): SimpleFeature = { builder.init(sf) builder.set(attrIndex, replayDate) builder.buildFeature(sf.getID) } } /** Represents the state at a specific point in time. * * @param sft the SFT * @param events must be ordered, with the most recent first; must consist of only [[CreateOrUpdate]] and * [[Delete]] messages */ case class ReplaySnapshotFeatureCache(override val sft: SimpleFeatureType, replayTime: Long, events: Seq[GeoMessage]) extends KafkaConsumerFeatureCache { override lazy val (spatialIndex, features) = processMessages private def processMessages: (SpatialIndex[SimpleFeature], mutable.Map[String, FeatureHolder]) = { val features = new mutable.HashMap[String, FeatureHolder]() val qt = new WrappedQuadtree[SimpleFeature] val seen = new mutable.HashSet[String] val timeHelper = new ReplayTimeHelper(sft, replayTime) events.foreach { case CreateOrUpdate(ts, sf) => val id = sf.getID // starting with the most recent so if haven't seen it yet, add it, otherwise keep newer version if (!seen(id)) { val env = sf.geometry.getEnvelopeInternal val modSF = timeHelper.reType(sf) qt.insert(env, modSF) features.put(id, FeatureHolder(modSF, env)) seen.add(id) } case Delete(ts, id) => seen.add(id) case unknown => // clear messages should not get here throw new IllegalStateException(s"Unexpected message: '$unknown'") } (qt, features) } } /** Configuration for replaying a Kafka DataStore. * * @param start the instant at which to start the replay * @param end the instant at which to end the replay; must be >= ``start`` * @param readBehind the additional time to pre-read */ case class ReplayConfig(start: Instant, end: Instant, readBehind: Duration) { require(start.getMillis <= end.getMillis, "The start time must not be after the end time.") /** The starting time to read from kafka, accounting for read behind. */ val realStartTime: Instant = start.minus(readBehind) /** * @param msg the [[GeoMessage]] to check * @return true if the ``message`` is before the ``realStartTime`` [[Instant]] */ def isBeforeRealStart(msg: GeoMessage): Boolean = msg.timestamp.isBefore(realStartTime) /** * @param msg the [[GeoMessage]] to check * @return true if the ``message`` is not after the ``end`` [[Instant]] */ def isNotAfterEnd(msg: GeoMessage): Boolean = !msg.timestamp.isAfter(end) def isInWindow(time: Long): Boolean = !(start.isAfter(time) || end.isBefore(time)) } object ReplayConfig extends LazyLogging { def apply(start: Long, end: Long, readBehind: Long): ReplayConfig = ReplayConfig(new Instant(start), new Instant(end), Duration.millis(readBehind)) def encode(conf: ReplayConfig): String = s"${conf.start.getMillis.toHexString}-${conf.end.getMillis.toHexString}-${conf.readBehind.getMillis.toHexString}" def decode(rcString: String): Option[ReplayConfig] = { try { val values = rcString.split('-').map(java.lang.Long.valueOf(_, 16)) if (values.length != 3) { logger.error("Unable to decode ReplayConfig. Wrong number of tokens splitting " + rcString) None } else { val start = new Instant(values(0)) val end = new Instant(values(1)) val duration = Duration.millis(values(2)) Some(ReplayConfig(start, end, duration)) } } catch { case e: IllegalArgumentException => logger.error("Exception thrown decoding ReplayConfig.", e) None } } } /** Splits a [[Filter]] into the requested Kafka Message Timestamp and the remaining filters */ case class TimestampFilterSplit(ts: Option[Long], filter: Option[Filter]) object TimestampFilterSplit { import ReplayTimeHelper.ff /** Look for a Kafka message timestamp filter in ``filter`` and if found, extract the requested timestamp * and return that timestamp and the remaining filters. * * Any operand (or none) of an 'and' may specify a timestamp. If multiple operands of the 'and' * specify a timestamp then all timestamps must be the same. * * For an 'or' the requirement is that either all operands specify the same timestamp or none specify a * timestamp. * * A timestamp may not be specified within a 'not'. */ def split(filter: Filter): Option[TimestampFilterSplit] = filter match { case eq: PropertyIsEqualTo => val ts = ReplayTimeHelper.fromFilter(eq) val f = ts.map(_ => None).getOrElse(Some(filter)) Some(TimestampFilterSplit(ts, f)) case a: And => // either no child specifies a timestamp, one child specifies a timestamp or multiple children specify // the same timestamp split(a, buildAnd) case o: Or => // either all children specify the same timestamp or none specify a timestamp split(o, buildOr) case n: Not => // the filter being inverted may not contain a timestamp val s = split(n.getFilter) s.flatMap(split => split.ts.map(_ => None) .getOrElse(Some(TimestampFilterSplit(None, split.filter.map(ff.not))))) case _ => Some(TimestampFilterSplit(None, Some(filter))) } type SplitCombiner = Seq[TimestampFilterSplit] => Option[TimestampFilterSplit] def split(op: BinaryLogicOperator, combiner: SplitCombiner): Option[TimestampFilterSplit] = { val children = op.getChildren.asScala val childSplits = children.flatMap(c => split(c)) if (childSplits.size != children.size) { // one or more children are invalid None } else { combiner(childSplits) } } def buildAnd(childSplits: Seq[TimestampFilterSplit]): Option[TimestampFilterSplit] = { val tsList = childSplits.flatMap(_.ts) val ts = tsList.headOption if (tsList.nonEmpty && tsList.tail.exists(_ != tsList.head)) { // inconsistent timestamps None } else { val filters = childSplits.flatMap(_.filter) val filter = combine(filters, ff.and) Some(TimestampFilterSplit(ts, filter)) } } def buildOr(childSplits: Seq[TimestampFilterSplit]): Option[TimestampFilterSplit] = { val ts = childSplits.headOption.flatMap(_.ts) if (!childSplits.forall(_.ts == ts)) { // inconsistent timestamps None } else { val filters = childSplits.flatMap(_.filter) val filter = combine(filters, ff.or) Some(TimestampFilterSplit(ts, filter)) } } def combine(filters: Seq[Filter], combiner: java.util.List[Filter] => Filter): Option[Filter] = { if (filters.isEmpty) { None } else if (filters.size == 1) { filters.headOption } else { Some(combiner(filters.asJava)) } } }
nagavallia/geomesa
geomesa-kafka/geomesa-kafka-datastore/geomesa-kafka-datastore-common/src/main/scala/org/locationtech/geomesa/kafka/ReplayConfig.scala
Scala
apache-2.0
9,360
package com.socrata.querycoordinator import com.socrata.querycoordinator.QueryRewriter.{Anal, RollupName} import com.socrata.querycoordinator.util.Join import com.socrata.soql.SoQLAnalysis import com.socrata.soql.environment.ColumnName import com.socrata.soql.types.SoQLType class TestQueryRewriter extends TestQueryRewriterBase { import Join._ /** Each rollup here is defined by: * - a name * - a soql statement. Note this must be the mapped statement, * i.e. non-system columns prefixed by an _, and backtick escaped * - a Seq of the soql types for each column in the rollup selection */ val rollups = Seq( ("r1", "SELECT `_dxyz-num1`, count(`_dxyz-num1`) GROUP BY `_dxyz-num1`"), ("r2", "SELECT count(`:wido-ward`), `:wido-ward` GROUP BY `:wido-ward`"), ("r3", "SELECT `:wido-ward`, count(*), count(`_crim-typ3`) GROUP BY `:wido-ward`"), ("r4", "SELECT `:wido-ward`, `_crim-typ3`, count(*), `_dxyz-num1`, `_crim-date` GROUP BY `:wido-ward`, `_crim-typ3`, `_dxyz-num1`, `_crim-date`"), ("r5", "SELECT `_crim-typ3`, count(1) group by `_crim-typ3`"), ("r6", "SELECT `:wido-ward`, `_crim-typ3`"), ("r7", "SELECT `:wido-ward`, min(`_dxyz-num1`), max(`_dxyz-num1`), sum(`_dxyz-num1`), count(*) GROUP BY `:wido-ward`"), ("r8", "SELECT date_trunc_ym(`_crim-date`), `:wido-ward`, count(*) GROUP BY date_trunc_ym(`_crim-date`), `:wido-ward`"), ("r9", "SELECT `_crim-typ3`, count(case(`_crim-date` IS NOT NULL, `_crim-date`, true, `_some-date`)) group by `_crim-typ3`"), ("rw1", "SELECT `_dxyz-num1`, count(`_dxyz-num1`) WHERE `_crim-typ3`='traffic' GROUP BY `_dxyz-num1`"), ("rw4", "SELECT `:wido-ward`, `_crim-typ3`, count(*), `_dxyz-num1`, `_crim-date` WHERE `_crim-typ3`='traffic' GROUP BY `:wido-ward`, `_crim-typ3`, `_dxyz-num1`, `_crim-date`") ) val rollupInfos = rollups.map { x => new RollupInfo(x._1, x._2) } /** Pull in the rollupAnalysis for easier debugging */ val rollupAnalysis = rewriter.analyzeRollups(schema, rollupInfos, Map.empty) val rollupRawSchemas = rollupAnalysis.mapValues { case analysis: Anal => analysis.selection.values.toSeq.zipWithIndex.map { case (expr, idx) => rewriter.rollupColumnId(idx) -> expr.typ }.toMap } /** Analyze a "fake" query that has the rollup table column names in, so we * can use it to compare with the rewritten one in assertions. */ def analyzeRewrittenQuery(rollupName: String, q: String): SoQLAnalysis[String, SoQLType] = { val rewrittenRawSchema = rollupRawSchemas(rollupName) val rollupNoopColumnNameMap = rewrittenRawSchema.map { case (k, v) => ColumnName(k) -> k } val rollupDsContext = QueryParser.dsContext(rollupNoopColumnNameMap, rewrittenRawSchema) val rewrittenQueryAnalysis = analyzer.analyzeUnchainedQuery(q)(toAnalysisContext(rollupDsContext)).mapColumnIds(mapIgnoringQualifier(rollupNoopColumnNameMap)) rewrittenQueryAnalysis } test("map query ward, count(*)") { val q = "SELECT ward, count(*) AS ward_count GROUP BY ward" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS ward, coalesce(sum(c3), 0) AS ward_count GROUP by c1" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r4", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysis) rewrites should contain key "r3" rewrites should contain key "r7" rewrites should contain key "r8" rewrites should have size 4 } test("map query crime_type, ward, count(*)") { val q = "SELECT crime_type, ward, count(*) AS ward_count GROUP BY crime_type, ward" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c2 AS crime_type, c1 as ward, coalesce(sum(c3), 0) AS ward_count GROUP by c2, c1" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r4", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } // count(null) is very different than count(*)! test("don't map count(null) - query crime_type, ward, count(null)") { val q = "SELECT crime_type, ward, count(null) AS ward_count GROUP BY crime_type, ward" val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should have size 0 } test("shouldn't rewrite column not in rollup") { val q = "SELECT ward, dont_create_rollups, count(*) AS ward_count GROUP BY ward, dont_create_rollups" val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should be(empty) } test("hidden column aliasing") { val q = "SELECT crime_type as crimey, ward as crime_type" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 as crimey, c1 as crime_type" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r6", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r6" rewrites.get("r6").get should equal(rewrittenQueryAnalysis) } test("map query crime_type, ward, 1, count(*) with LIMIT / OFFSET") { val q = "SELECT crime_type, ward, 1, count(*) AS ward_count GROUP BY crime_type, ward LIMIT 100 OFFSET 200" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c2 AS crime_type, c1 as ward, 1, coalesce(sum(c3), 0) AS ward_count GROUP by c2, c1 LIMIT 100 OFFSET 200" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r4", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } test("count on literal - map query crime_type, count(0), count('potato')") { val q = "SELECT crime_type, count(0) as crimes, count('potato') as crimes_potato GROUP BY crime_type" val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) val rewrittenQueryR4 = "SELECT c2 AS crime_type, coalesce(sum(c3), 0) as crimes, coalesce(sum(c3), 0) as crimes_potato GROUP by c2" val rewrittenQueryAnalysisR4 = analyzeRewrittenQuery("r4", rewrittenQueryR4) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysisR4) val rewrittenQueryR5 = "SELECT c1 AS crime_type, coalesce(c2, 0) as crimes, coalesce(c2, 0) as crimes_potato" val rewrittenQueryAnalysisR5 = analyzeRewrittenQuery("r5", rewrittenQueryR5) rewrites should contain key "r5" rewrites.get("r5").get should equal(rewrittenQueryAnalysisR5) // TODO should be 3 eventually... should also rewrite from table w/o group by rewrites should have size 2 } test("map query ward, count(*) where") { val q = "SELECT ward, count(*) AS ward_count WHERE crime_type = 'Clownicide' AND number1 > 5 GROUP BY ward" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS ward, coalesce(sum(c3), 0) AS ward_count WHERE c2 = 'Clownicide' AND c4 > 5 GROUP by c1" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r4", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } // The simple case for rewriting a count(*) test("map query ward, count(crime_type) where") { val q = "SELECT ward, count(crime_type) AS crime_type_count WHERE ward != 5 GROUP BY ward" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS ward, coalesce(c3, 0) AS crime_type_count WHERE c1 != 5" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r3", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r3" rewrites.get("r3").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } // Should also be able to turn an arbitrary count(...) into a sum(...) test("map query crime_type, count(case(... matches ...))") { val q = "SELECT crime_type, count(case(crime_date IS NOT NULL, crime_date, true, some_date)) AS c GROUP BY crime_type" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS crime_type, coalesce(c2, 0) AS c" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r9", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r9" rewrites.get("r9").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } // Make sure we are validating the ... in count(...) matches the rollup test("map query crime_type, count(case(... doesn't match ...))") { val q = "SELECT crime_type, count(case(crime_date IS NOT NULL AND ward > 3, crime_date, true, some_date)) AS c GROUP BY crime_type" val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should have size 0 } test("order by - query crime_type, ward, count(*)") { val q = "SELECT crime_type, ward, count(*) AS ward_count GROUP BY crime_type, ward ORDER BY count(*) desc, crime_type" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c2 AS crime_type, c1 as ward, coalesce(sum(c3), 0) AS ward_count GROUP by c2, c1 ORDER BY coalesce(sum(c3), 0) desc, c2" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r4", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } test("order by with grouping removal - query crime_type, count(*)") { val q = "SELECT crime_type, count(*) AS c GROUP BY crime_type ORDER BY c DESC" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS crime_type, coalesce(c2, 0) AS c ORDER BY coalesce(c2, 0) DESC" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r9", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r5" rewrites.get("r5").get should equal(rewrittenQueryAnalysis) rewrites should contain key "r4" rewrites should have size 2 } test("grouping removal with different column ordering") { val q = "SELECT ward, date_trunc_ym(crime_date) AS d, count(*) GROUP BY ward, date_trunc_ym(crime_date)" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c2 AS ward, c1 AS d, coalesce(c3, 0) AS count" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r8", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r8" rewrites.get("r8").get should equal(rewrittenQueryAnalysis) rewrites should have size 2 } test("map query ward, date_trunc_ym(crime_date), count(*)") { val q = "SELECT ward, date_trunc_ym(crime_date) as d, count(*) AS ward_count GROUP BY ward, date_trunc_ym(crime_date)" val queryAnalysis = analyzeQuery(q) val rewrittenQueryR4 = "SELECT c1 AS ward, date_trunc_ym(c5) as d, coalesce(sum(c3), 0) AS ward_count GROUP by c1, date_trunc_ym(c5)" val rewrittenQueryAnalysisR4 = analyzeRewrittenQuery("r4", rewrittenQueryR4) // in this case, we map the function call directly to the column ref val rewrittenQueryR8 = "SELECT c2 as ward, c1 as d, coalesce(c3, 0) as ward_count" val rewrittenQueryAnalysisR8 = analyzeRewrittenQuery("r8", rewrittenQueryR8) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysisR4) rewrites should contain key "r8" rewrites.get("r8").get should equal(rewrittenQueryAnalysisR8) rewrites should have size 2 } test("map query ward, max(n), min(n), count(*)") { val q = "SELECT ward, max(number1) as max_num, min(number1) as min_num, count(*) AS ward_count GROUP BY ward" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS ward, c3 as max_num, c2 as min_num, coalesce(c5, 0) AS ward_count" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r7", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r7" rewrites.get("r7").get should equal(rewrittenQueryAnalysis) // TODO should be 2 eventually... should also rewrite from table w/o group by // rewrites should contain key("r4") rewrites should have size 1 } test("Query count(0) without group by") { val q = "SELECT count(0) as countess WHERE crime_type = 'NARCOTICS'" val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) val rewrittenQueryR4 = "SELECT coalesce(sum(c3), 0) as countess WHERE c2 = 'NARCOTICS'" val rewrittenQueryAnalysisR4 = analyzeRewrittenQuery("r4", rewrittenQueryR4) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysisR4) val rewrittenQueryR5 = "SELECT coalesce(sum(c2), 0) as countess WHERE c1 = 'NARCOTICS'" val rewrittenQueryAnalysisR5 = analyzeRewrittenQuery("r5", rewrittenQueryR5) rewrites should contain key "r5" rewrites.get("r5").get should equal(rewrittenQueryAnalysisR5) rewrites should have size 2 } test("Query min/max without group by") { val q = "SELECT min(number1) as minn, max(number1) as maxn WHERE ward = 7" val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) val rewrittenQuery = "SELECT min(c2) as minn, max(c3) as maxn WHERE c1 = 7" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r7", rewrittenQuery) rewrites should contain key "r7" rewrites.get("r7").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } test("don't map query 'select ward' to grouped rollups") { val q = "SELECT ward" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS ward" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r6", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r6" rewrites.get("r6").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } test("rewrite where and having") { val q = "SELECT ward, count(*) AS c WHERE number1 > 100 GROUP BY ward HAVING count(*) > 5" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS ward, coalesce(sum(c3), 0) AS c WHERE c4 > 100 GROUP BY c1 HAVING c > 5" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r4", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r4" rewrites.get("r4").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } test("don't rewrite having if having expression is not in rollup") { val q = "SELECT ward, count(*) AS c WHERE number1 > 100 GROUP BY ward HAVING count(crime_date) > 5" val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should have size 0 } test("rewrite where and having to where with grouping removal") { val q = "SELECT ward, count(ward) AS c WHERE ward > 100 GROUP BY ward HAVING count(ward) > 5" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c2 AS ward, coalesce(c1, 0) AS c WHERE c2 > 100 AND coalesce(c1, 0) > 5" val rewrittenQueryAnalysis = analyzeRewrittenQuery("r2", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "r2" rewrites.get("r2").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } test("rewrite where with where removed") { val q = "SELECT number1, count(number1) AS cn1 WHERE crime_type = 'traffic' GROUP BY number1" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c1 AS number1, coalesce(c2, 0) AS cn1" val rewrittenQueryAnalysis = analyzeRewrittenQuery("rw1", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "rw1" rewrites.get("rw1").get should equal(rewrittenQueryAnalysis) rewrites should have size 1 } test("rewrite where with subset where removed") { val q = "SELECT number1, count(*) AS ct WHERE ward=1 AND crime_type = 'traffic' GROUP BY number1" val queryAnalysis = analyzeQuery(q) val rewrittenQuery = "SELECT c4 AS number1, coalesce(sum(c3), 0) AS ct WHERE c1=1 GROUP BY number1" val rewrittenQueryAnalysis = analyzeRewrittenQuery("rw4", rewrittenQuery) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should contain key "rw4" rewrites.get("rw4").get should equal(rewrittenQueryAnalysis) } test("don't rewrite if rollup where is not a subset of query top level AND part") { assertNoRollupMatch("SELECT number1, count(number1) AS cn1 WHERE crime_type = 'non-traffic' GROUP BY number1") } test("don't rewrite if rollup where is a subset of query top level OR part") { assertNoRollupMatch("SELECT number1, count(number1) AS cn1 WHERE crime_type = 'traffic' OR number1 = 2 GROUP BY number1") } private def assertNoRollupMatch(q: String): Unit = { val queryAnalysis = analyzeQuery(q) val rewrites = rewriter.possibleRewrites(queryAnalysis, rollupAnalysis) rewrites should have size 0 } }
socrata-platform/query-coordinator
query-coordinator/src/test/scala/com/socrata/querycoordinator/TestQueryRewriter.scala
Scala
apache-2.0
18,106
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.mxnet import org.apache.mxnet.Base.{NDArrayHandle, NDArrayHandleRef, checkCall, _LIB} import org.apache.mxnet.DType.DType import org.apache.mxnet.SparseFormat.SparseFormat object SparseNDArray { /** * Create a Compressed Sparse Row Storage (CSR) Format Matrix * @param data the data to feed * @param indices The indices array stores the column index for each non-zero element in data * @param indptr The indptr array is what will help identify the rows where the data appears * @param shape the shape of CSR NDArray to be created * @param ctx the context of this NDArray * @return SparseNDArray */ def csrMatrix(data: Array[Float], indices: Array[Float], indptr: Array[Float], shape: Shape, ctx: Context): SparseNDArray = { val fmt = SparseFormat.CSR val dataND = NDArray.array(data, Shape(data.length), ctx) val indicesND = NDArray.array(indices, Shape(indices.length), ctx).asType(DType.Int64) val indptrND = NDArray.array(indptr, Shape(indptr.length), ctx).asType(DType.Int64) val dTypes = Array(indptrND.dtype, indicesND.dtype) val shapes = Array(indptrND.shape, indicesND.shape) val handle = newAllocHandle(fmt, shape, ctx, false, DType.Float32, dTypes, shapes) checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, dataND.handle, -1)) checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, indptrND.handle, 0)) checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, indicesND.handle, 1)) new SparseNDArray(handle) } /** * RowSparseNDArray stores the matrix in row sparse format, * which is designed for arrays of which most row slices are all zeros * @param data Any Array(Array(... Array(Float))) * @param indices the indices to store the data * @param shape shape of the NDArray * @param ctx Context * @return SparseNDArray */ def rowSparseArray(data: Array[_], indices: Array[Float], shape: Shape, ctx: Context): SparseNDArray = { val dataND = NDArray.toNDArray(data) val indicesND = NDArray.array(indices, Shape(indices.length), ctx).asType(DType.Int64) rowSparseArray(dataND, indicesND, shape, ctx) } /** * RowSparseNDArray stores the matrix in row sparse format, * which is designed for arrays of which most row slices are all zeros * @param data NDArray input * @param indices in NDArray. Only DType.Int64 supported * @param shape shape of the NDArray * @param ctx Context * @return */ def rowSparseArray(data: NDArray, indices: NDArray, shape: Shape, ctx: Context): SparseNDArray = { val fmt = SparseFormat.ROW_SPARSE val handle = newAllocHandle(fmt, shape, ctx, false, DType.Float32, Array(indices.dtype), Array(indices.shape)) checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, data.handle, -1)) checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, indices.handle, 0)) new SparseNDArray(handle) } def retain(sparseNDArray: SparseNDArray, indices: Array[Float]): SparseNDArray = { if (sparseNDArray.sparseFormat == SparseFormat.CSR) { throw new IllegalArgumentException("CSR not supported") } NDArray.genericNDArrayFunctionInvoke("_sparse_retain", Seq(sparseNDArray, NDArray.toNDArray(indices))).head.toSparse() } private def newAllocHandle(stype : SparseFormat, shape: Shape, ctx: Context, delayAlloc: Boolean, dtype: DType = DType.Float32, auxDTypes: Array[DType], auxShapes: Array[Shape]) : NDArrayHandle = { val hdl = new NDArrayHandleRef checkCall(_LIB.mxNDArrayCreateSparseEx( stype.id, shape.toArray, shape.length, ctx.deviceTypeid, ctx.deviceId, if (delayAlloc) 1 else 0, dtype.id, auxDTypes.length, auxDTypes.map(_.id), auxShapes.map(_.length), auxShapes.map(_.get(0)), hdl) ) hdl.value } } /** * Sparse NDArray is the child class of NDArray designed to hold the Sparse format * * <p> Currently, Rowsparse and CSR typed NDArray is supported. Most of the Operators * will convert Sparse NDArray to dense. Basic operators like <code>add</code> will * have optimization for sparse operattions</p> * @param handle The pointer that SparseNDArray holds * @param writable whether the NDArray is writable */ class SparseNDArray private[mxnet] (override private[mxnet] val handle: NDArrayHandle, override val writable: Boolean = true) extends NDArray(handle, writable) { private lazy val dense: NDArray = toDense override def toString: String = { dense.toString } /** * Convert a SparseNDArray to dense NDArray * @return NDArray */ def toDense: NDArray = { NDArray.api.cast_storage(this, SparseFormat.DEFAULT.toString).head } override def toArray: Array[Float] = { dense.toArray } override def at(idx: Int): NDArray = { dense.at(idx) } override def slice(start: Int, end: Int): NDArray = { NDArray.api.slice(this, Shape(start), Shape(end)) } /** * Get the Data portion from a Row Sparse NDArray * @return NDArray */ def getData: NDArray = { require(this.sparseFormat == SparseFormat.ROW_SPARSE, "Not Supported for CSR") val handle = new NDArrayHandleRef _LIB.mxNDArrayGetDataNDArray(this.handle, handle) new NDArray(handle.value, false) } /** * Get the indptr Array from a CSR NDArray * @return NDArray */ def getIndptr: NDArray = { require(this.sparseFormat == SparseFormat.CSR, "Not Supported for row sparse") getAuxNDArray(0) } /** * Get the indice Array * @return NDArray */ def getIndices: NDArray = { if (this.sparseFormat == SparseFormat.ROW_SPARSE) { getAuxNDArray(0) } else { getAuxNDArray(1) } } private def getAuxNDArray(idx: Int): NDArray = { val handle = new NDArrayHandleRef checkCall(_LIB.mxNDArrayGetAuxNDArray(this.handle, idx, handle)) new NDArray(handle.value, false) } }
reminisce/mxnet
scala-package/core/src/main/scala/org/apache/mxnet/SparseNDArray.scala
Scala
apache-2.0
7,021
package models.join import models.db.{CreateShip, MasterShipBase} import scalikejdbc._ /** * * @author ponkotuy * Date: 14/04/20. */ case class CreateShipWithName( fuel: Int, ammo: Int, steel: Int, bauxite: Int, develop: Int, largeFlag: Boolean, created: Long, name: String, firstShip: Option[String]) object CreateShipWithName { def apply( cs: SyntaxProvider[CreateShip], ms: SyntaxProvider[MasterShipBase], msb: SyntaxProvider[MasterShipBase])( rs: WrappedResultSet): CreateShipWithName = new CreateShipWithName( rs.int(cs.fuel), rs.int(cs.ammo), rs.int(cs.steel), rs.int(cs.bauxite), rs.int(cs.develop), rs.boolean(cs.largeFlag), rs.long(cs.created), rs.string(ms.name), rs.stringOpt(msb.name) ) } case class CreateShipWithName2(memberId: Long, resultShip: Int, largeFlag: Boolean, created: Long, name: String) object CreateShipWithName2 { def apply(cs: SyntaxProvider[CreateShip], ms: SyntaxProvider[MasterShipBase])( rs: WrappedResultSet): CreateShipWithName2 = new CreateShipWithName2( rs.long(cs.memberId), rs.int(cs.resultShip), rs.boolean(cs.largeFlag), rs.long(cs.created), rs.string(ms.name) ) }
ttdoda/MyFleetGirls
server/app/models/join/CreateShipWithName.scala
Scala
mit
1,259
package foo import org.openjdk.jmh.annotations.{Benchmark, Scope, State} import java.nio.file.{Files, Paths} class TestBenchmark { @State(Scope.Benchmark) class BenchmarkState { val myScalaType = ScalaType(100) val myJavaType = new JavaType } @Benchmark def sumIntegersBenchmark: Int = AddNumbers.addUntil1000 @Benchmark def fileAccessBenchmark: Unit = { val path = Paths.get("test/jmh/data.txt") Files.readAllLines(path) } }
smparkes/rules_scala
test/jmh/TestBenchmark.scala
Scala
apache-2.0
465
package uk.gov.gds.router.integration import uk.gov.gds.router.util.JsonSerializer._ import uk.gov.gds.router.model._ class ApplicationsLifecycleTest extends RouterIntegrationTestSetup { test("can create and delete applications") { given("A new application ID that does not exist in the router database") val applicationId = uniqueIdForTest when("We create an application with the new ID pointing at our test harness application") var response = post("/applications/" + applicationId, Map("backend_url" -> mainHostBackendUrl)) then("We should get a 201 (created) response that contains a JSON representation of our application") response.status should be(201) var application = fromJson[Application](response.body) application.application_id should be(applicationId) application.backend_url should be(mainHostBackendUrl) when("We attempt to re-create the same application") response = post("/applications/" + applicationId, Map("backend_url" -> mainHostBackendUrl)) then("we should get a 409 (conflict) response") response.status should be(409) when("We attempt to load the application by issuing a GET to its API url") response = get("/applications/" + applicationId) then("We should get a 200 response that contains a JSON representation of our application ") response.status should be(200) application = fromJson[Application](response.body) application.application_id should be(applicationId) application.backend_url should be(mainHostBackendUrl) when("We issue a PUT request to our applications URL that updates its backend URL to a new URL") response = put("/applications/" + applicationId, Map("backend_url" -> "new_backend_url")) then("We should get a 200 response that contains a JSON representation of our application ") response.status should be(200) application = fromJson[Application](response.body) application.application_id should be(applicationId) application.backend_url should be("new_backend_url") when("We attempt to delete the application") response = delete("/applications/" + applicationId) then("We should get a 204 response and the application should be gone") response.status should be(204) response = get("/applications/" + applicationId) response.status should be(404) } test("Default application is created to handle 410 gone routes"){ val response = get("/applications/router-gone") logger.info("app " + response.body) val application = fromJson[Application](response.body) application.application_id should be(ApplicationForGoneRoutes.application_id) } test("When an application is deleted full routes continue to exist but are 'Gone' and prefix routes do not exist") { given("The test application has been created before the test") get("/applications/" + applicationId).status should be(200) when("The application is deleted") var response = delete("/applications/" + applicationId) logger.info(response.body) response.status should be(204) then("The application no longer exists") get("/applications/" + applicationId).status should be(404) then("The full route returns a 410 Gone") response = get("/route/fulltest/test.html") response.status should be(410) then("The prefix routes return 404") response = get("/route/prefixtest") response.status should be(404) response = get("/route/test") response.status should be(404) } test("Can create application using put") { given("A unique application ID") val applicationId = uniqueIdForTest when("We attempt to create an application using PUT") val response = put("/applications/" + applicationId, Map("backend_url" -> mainHostBackendUrl)) then("We should get a 201 response signifying succesful creation") response.status should be(201) } }
gds-attic/scala-router
router/router-integration-tests/src/test/scala/uk/gov/gds/router/integration/ApplicationsLifecycleTest.scala
Scala
mit
3,905
/* * Copyright (C) 2016 Department for Business, Energy and Industrial Strategy * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package rifs.business import org.joda.time.{DateTime, DateTimeZone} import org.scalatest._ import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.{Seconds, Span} import play.api.libs.json.{JsObject, JsString} import play.api.libs.mailer.{Email, MailerClient} import rifs.business.data.ApplicationDetails import rifs.business.models._ import rifs.business.notifications.EmailNotifications import scala.concurrent.{ExecutionContext, Future} class NotificationsTest extends WordSpecLike with Matchers with OptionValues with ScalaFutures { implicit val waitConf = PatienceConfig(Span(3, Seconds)) import NotificationsTestData._ "notification" should { "return no notification ID for a missing application ID" in { val notification = new EmailNotifications(new DummyMailer(""), new DummyGatherDetailsAndSect(Future.successful(None), Future.successful(None)), oppNotFoundOps) val res1 = notification.notifyPortfolioManager(dummyAppId, "from", "to") res1.futureValue shouldBe None val res2 = notification.notifyApplicant(dummyAppId, DateTime.now(DateTimeZone.UTC), "from", "to", "mgr@") res2.futureValue shouldBe None } "return no notification ID for a missing opportunity ID" in { val notification = new EmailNotifications(new DummyMailer(""), new DummyGatherDetailsAndSect(Future.successful(None), Future.successful(None)), oppNotFoundOps) val res1 = notification.notifyManager(OpportunityId(123), "from", "to") res1.futureValue shouldBe None } "return no notification ID for missing details section" in { val notification = new EmailNotifications(new DummyMailer(""), new DummyGatherDetailsAndSect(appOps.gatherDetails(dummyAppId), Future.successful(None)), oppNotFoundOps) val res = notification.notifyApplicant(dummyAppId, DateTime.now(DateTimeZone.UTC), "from", "to", "mgr@") res.futureValue shouldBe None } "create a notification ID upon success" in { val MAIL_ID = "yey" val sender = new DummyMailer(MAIL_ID) val notificationMgr = new EmailNotifications(sender, appOps, oppNotFoundOps) val res1 = notificationMgr.notifyPortfolioManager(dummyAppId, "from", "to") res1.futureValue.value.id shouldBe MAIL_ID val notificationAppl = new EmailNotifications(sender, appOpsAndSection, oppNotFoundOps) val res2 = notificationAppl.notifyApplicant(dummyAppId, DateTime.now(DateTimeZone.UTC), "from@", "to@", "mgr@") res2.futureValue.value.id shouldBe MAIL_ID val oppNotify = new EmailNotifications(sender, appOps, oppOps) val res3 = oppNotify.notifyManager(dummyOppId, "from@", "to@") res3.futureValue.value.id shouldBe MAIL_ID } "return error if e-mailer throws" in { val sender = new DummyMailer(throw new RuntimeException()) val notificationMgr = new EmailNotifications(sender, appOps, oppNotFoundOps) val res1 = notificationMgr.notifyPortfolioManager(dummyAppId, "from", "to") whenReady(res1.failed) { ex => ex shouldBe a[RuntimeException] } val notificationAppl = new EmailNotifications(sender, appOpsAndSection, oppNotFoundOps) val res2 = notificationAppl.notifyApplicant(dummyAppId, DateTime.now(DateTimeZone.UTC), "from@", "to@", "mgr@") whenReady(res2.failed) { ex => ex shouldBe a[RuntimeException] } val oppNotify = new EmailNotifications(sender, appOps, oppOps) val res3 = oppNotify.notifyManager(dummyOppId, "from@", "to@") whenReady(res3.failed) { ex => ex shouldBe a[RuntimeException] } } } } object NotificationsTestData { implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global val dummyAppId = ApplicationId(1) val oppNotFoundOps = new DummySaveDescription(_ => Future.successful(None)) val dummyOppId = OpportunityId(1399) val dummyOpp = OpportunityRow(dummyOppId, "Op1", "today", None, 2000, "per event", None, None) val oppOps = new DummySaveDescription(oid => Future.successful(if (oid == dummyOppId) Some(dummyOpp) else None)) class DummyMailer(result: => String) extends MailerClient { override def send(email: Email): String = result } val (appOpsAndSection, appOps) = { val appFormId = ApplicationFormId(1) val oppId = OpportunityId(1) val opp = OpportunityRow(oppId, "oz1", "", None, 0, "", None, None) val appDetails = ApplicationDetails( ApplicationRow(dummyAppId, appFormId, None), ApplicationFormRow(appFormId, oppId), opp) val details = Future.successful(Some(appDetails)) val appSectRow = ApplicationSectionRow(ApplicationSectionId(0), dummyAppId, rifs.business.models.APP_TITLE_SECTION_NO, JsObject(Seq("title" -> JsString("app title"))), None) (new DummyGatherDetailsAndSect(details, Future.successful(Some(appSectRow))), new DummyGatherDetails(details)) } class DummyGatherDetails(result: => Future[Option[ApplicationDetails]]) extends StubApplicationOps { override def gatherDetails(id: SubmittedApplicationRef): Future[Option[ApplicationDetails]] = result } class DummyGatherDetailsAndSect(details: => Future[Option[ApplicationDetails]], section: => Future[Option[ApplicationSectionRow]]) extends DummyGatherDetails(details) { override def fetchSection(id: ApplicationId, sectionNumber: Int): Future[Option[ApplicationSectionRow]] = section } class DummySaveDescription(oppRow: OpportunityId => Future[Option[OpportunityRow]]) extends StubOpportunityOps { override def byId(id: OpportunityId): Future[Option[OpportunityRow]] = oppRow(id) } }
UKGovernmentBEIS/rifs-business
src/test/scala/rifs/business/NotificationsTest.scala
Scala
gpl-3.0
6,367
package com.datastax.spark.connector.streaming import java.net.InetAddress import java.util.concurrent.CompletableFuture import com.datastax.spark.connector._ import com.datastax.spark.connector.cluster.DefaultCluster import com.datastax.spark.connector.cql.CassandraConnector import com.datastax.spark.connector.testkit._ import org.apache.spark.rdd.RDD import org.apache.spark.streaming.{Milliseconds, StreamingContext} import org.scalatest.concurrent.Eventually import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} import scala.collection.mutable import scala.concurrent.Future import scala.language.postfixOps import scala.util.Random case class WordCount(word: String, count: Int) case class Key(word: String) class RDDStreamingSpec extends SparkCassandraITFlatSpecBase with DefaultCluster with Eventually with BeforeAndAfterEach with BeforeAndAfterAll { import org.scalatest.time.SpanSugar._ implicit val pc = PatienceConfig(60.seconds, 1.second) override def beforeClass { CassandraConnector(defaultConf).withSessionDo { session => val executor = getExecutor(session) createKeyspace(session) awaitAll( Future { session.execute(s"CREATE TABLE $ks.streaming_wordcount (word TEXT PRIMARY KEY, count COUNTER)") }, Future { session.execute(s"CREATE TABLE $ks.streaming_join (word TEXT PRIMARY KEY, count COUNTER)") val ps = session.prepare(s"UPDATE $ks.streaming_join set count = count + 10 where word = ?") awaitAll(for (d <- dataSeq; word <- d) yield executor.executeAsync(ps.bind(word.trim)) ) }, Future { session.execute(s"CREATE TABLE $ks.streaming_join_output (word TEXT PRIMARY KEY, count COUNTER)") }, Future { session.execute(s"CREATE TABLE $ks.dstream_join_output (word TEXT PRIMARY KEY, count COUNTER)") }, Future { session.execute(s"CREATE TABLE $ks.streaming_deletes (word TEXT PRIMARY KEY, count INT)") session.execute(s"INSERT INTO $ks.streaming_deletes (word, count) VALUES ('1words', 1)") session.execute(s"INSERT INTO $ks.streaming_deletes (word, count) VALUES ('1round', 2)") session.execute(s"INSERT INTO $ks.streaming_deletes (word, count) VALUES ('survival', 3)") } ) executor.waitForCurrentlyExecutingTasks() } } val r = new Random() // Build 4 400 Element RDDs to use as a DStream val dataRDDs = new mutable.Queue[RDD[String]]() override def beforeEach() { while (dataRDDs.nonEmpty) dataRDDs.dequeue for (rddNum <- 1 to 4) { dataRDDs.enqueue(sc.parallelize((1 to 400).map(item => data(r.nextInt(data.size))))) } } def withStreamingContext(test: StreamingContext => Any) = { val ssc = new StreamingContext(sc, Milliseconds(200)) try { test(ssc) } finally { ssc.stop(stopSparkContext = false, stopGracefully = true) // this will rethrow any exceptions thrown during execution (from foreachRDD etc) ssc.awaitTerminationOrTimeout(60 * 1000) } } "RDDStream" should s"write from the stream to cassandra table: $ks.streaming_wordcount" in withStreamingContext { ssc => val stream = ssc.queueStream[String](dataRDDs) val wc = stream .map(x => (x, 1)) .reduceByKey(_ + _) .saveToCassandra(ks, "streaming_wordcount") // start the streaming context so the data can be processed and actor started ssc.start() eventually { dataRDDs shouldBe empty } eventually { val rdd = ssc.cassandraTable[WordCount](ks, "streaming_wordcount") val result = rdd.collect result.nonEmpty should be(true) result.length should be(data.size) } } it should "be able to utilize joinWithCassandra during transforms " in withStreamingContext { ssc => val stream = ssc.queueStream[String](dataRDDs) val wc = stream .map(x => (x, 1)) .reduceByKey(_ + _) .saveToCassandra(ks, "streaming_wordcount") stream .map(Tuple1(_)) .transform(rdd => rdd.joinWithCassandraTable(ks, "streaming_join")) .map(_._2) .saveToCassandra(ks, "streaming_join_output") ssc.start() eventually { dataRDDs shouldBe empty } eventually { val rdd = ssc.cassandraTable[WordCount](ks, "streaming_join_output") val result = rdd.collect result.nonEmpty should be(true) result.length should be(data.size) rdd.collect.nonEmpty && rdd.collect.length == data.size ssc.sparkContext.cassandraTable(ks, "streaming_join_output").collect.length should be(data.size) } } it should "be able to utilize joinWithCassandra and repartitionByCassandraTable on a Dstream " in withStreamingContext { ssc => val stream = ssc.queueStream[String](dataRDDs) val wc = stream .map(x => (x, 1)) .reduceByKey(_ + _) .saveToCassandra(ks, "streaming_wordcount") val jcRepart = stream .map(Tuple1(_)) .repartitionByCassandraReplica(ks, "streaming_join") val conn = CassandraConnector(defaultConf) jcRepart.foreachRDD(rdd => rdd .partitions .map(rdd.preferredLocations) .foreach { preferredLocations => withClue("Failed to verify preferred locations of repartitionByCassandraReplica RDD") { conn.hosts.map(_.getAddress) should contain(InetAddress.getByName(preferredLocations.head)) } } ) jcRepart.joinWithCassandraTable(ks, "streaming_join") .map(_._2) .saveToCassandra(ks, "dstream_join_output") ssc.start() eventually { dataRDDs shouldBe empty } eventually { val rdd = ssc.cassandraTable[WordCount](ks, "dstream_join_output") val result = rdd.collect result should have size data.size ssc.sparkContext.cassandraTable(ks, "dstream_join_output").collect.length should be(data.size) } } it should "delete rows from cassandra table base on streaming keys" in withStreamingContext { ssc => val stream = ssc.queueStream[String](dataRDDs) val wc = stream .map(Key(_)) .deleteFromCassandra(ks, "streaming_deletes") // start the streaming context so the data can be processed and actor started ssc.start() eventually { dataRDDs shouldBe empty } eventually { val rdd = ssc.cassandraTable[WordCount](ks, "streaming_deletes") val result = rdd.collect result.length should be(1) result(0) should be(WordCount("survival", 3)) } } }
datastax/spark-cassandra-connector
connector/src/it/scala/com/datastax/spark/connector/streaming/RDDStreamingSpec.scala
Scala
apache-2.0
6,576
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive import java.io.File import java.nio.file.Files import scala.sys.process._ import org.apache.spark.TestUtils import org.apache.spark.sql.{QueryTest, Row, SparkSession} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.CatalogTableType import org.apache.spark.sql.test.SQLTestUtils import org.apache.spark.util.Utils /** * Test HiveExternalCatalog backward compatibility. * * Note that, this test suite will automatically download spark binary packages of different * versions to a local directory `/tmp/spark-test`. If there is already a spark folder with * expected version under this local directory, e.g. `/tmp/spark-test/spark-2.0.3`, we will skip the * downloading for this spark version. */ class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { private val wareHousePath = Utils.createTempDir(namePrefix = "warehouse") private val tmpDataDir = Utils.createTempDir(namePrefix = "test-data") // For local test, you can set `sparkTestingDir` to a static value like `/tmp/test-spark`, to // avoid downloading Spark of different versions in each run. private val sparkTestingDir = new File("/tmp/test-spark") private val unusedJar = TestUtils.createJarWithClasses(Seq.empty) override def afterAll(): Unit = { Utils.deleteRecursively(wareHousePath) Utils.deleteRecursively(tmpDataDir) Utils.deleteRecursively(sparkTestingDir) super.afterAll() } private def tryDownloadSpark(version: String, path: String): Unit = { // Try mirrors a few times until one succeeds for (i <- 0 until 3) { val preferredMirror = Seq("wget", "https://www.apache.org/dyn/closer.lua?preferred=true", "-q", "-O", "-").!!.trim val url = s"$preferredMirror/spark/spark-$version/spark-$version-bin-hadoop2.7.tgz" logInfo(s"Downloading Spark $version from $url") if (Seq("wget", url, "-q", "-P", path).! == 0) { return } logWarning(s"Failed to download Spark $version from $url") } fail(s"Unable to download Spark $version") } private def downloadSpark(version: String): Unit = { tryDownloadSpark(version, sparkTestingDir.getCanonicalPath) val downloaded = new File(sparkTestingDir, s"spark-$version-bin-hadoop2.7.tgz").getCanonicalPath val targetDir = new File(sparkTestingDir, s"spark-$version").getCanonicalPath Seq("mkdir", targetDir).! Seq("tar", "-xzf", downloaded, "-C", targetDir, "--strip-components=1").! Seq("rm", downloaded).! } private def genDataDir(name: String): String = { new File(tmpDataDir, name).getCanonicalPath } override def beforeAll(): Unit = { super.beforeAll() val tempPyFile = File.createTempFile("test", ".py") // scalastyle:off line.size.limit Files.write(tempPyFile.toPath, s""" |from pyspark.sql import SparkSession |import os | |spark = SparkSession.builder.enableHiveSupport().getOrCreate() |version_index = spark.conf.get("spark.sql.test.version.index", None) | |spark.sql("create table data_source_tbl_{} using json as select 1 i".format(version_index)) | |spark.sql("create table hive_compatible_data_source_tbl_{} using parquet as select 1 i".format(version_index)) | |json_file = "${genDataDir("json_")}" + str(version_index) |spark.range(1, 2).selectExpr("cast(id as int) as i").write.json(json_file) |spark.sql("create table external_data_source_tbl_{}(i int) using json options (path '{}')".format(version_index, json_file)) | |parquet_file = "${genDataDir("parquet_")}" + str(version_index) |spark.range(1, 2).selectExpr("cast(id as int) as i").write.parquet(parquet_file) |spark.sql("create table hive_compatible_external_data_source_tbl_{}(i int) using parquet options (path '{}')".format(version_index, parquet_file)) | |json_file2 = "${genDataDir("json2_")}" + str(version_index) |spark.range(1, 2).selectExpr("cast(id as int) as i").write.json(json_file2) |spark.sql("create table external_table_without_schema_{} using json options (path '{}')".format(version_index, json_file2)) | |parquet_file2 = "${genDataDir("parquet2_")}" + str(version_index) |spark.range(1, 3).selectExpr("1 as i", "cast(id as int) as p", "1 as j").write.parquet(os.path.join(parquet_file2, "p=1")) |spark.sql("create table tbl_with_col_overlap_{} using parquet options(path '{}')".format(version_index, parquet_file2)) | |spark.sql("create view v_{} as select 1 i".format(version_index)) """.stripMargin.getBytes("utf8")) // scalastyle:on line.size.limit PROCESS_TABLES.testingVersions.zipWithIndex.foreach { case (version, index) => val sparkHome = new File(sparkTestingDir, s"spark-$version") if (!sparkHome.exists()) { downloadSpark(version) } val args = Seq( "--name", "prepare testing tables", "--master", "local[2]", "--conf", "spark.ui.enabled=false", "--conf", "spark.master.rest.enabled=false", "--conf", s"spark.sql.warehouse.dir=${wareHousePath.getCanonicalPath}", "--conf", s"spark.sql.test.version.index=$index", "--driver-java-options", s"-Dderby.system.home=${wareHousePath.getCanonicalPath}", tempPyFile.getCanonicalPath) runSparkSubmit(args, Some(sparkHome.getCanonicalPath)) } tempPyFile.delete() } test("backward compatibility") { val args = Seq( "--class", PROCESS_TABLES.getClass.getName.stripSuffix("$"), "--name", "HiveExternalCatalog backward compatibility test", "--master", "local[2]", "--conf", "spark.ui.enabled=false", "--conf", "spark.master.rest.enabled=false", "--conf", s"spark.sql.warehouse.dir=${wareHousePath.getCanonicalPath}", "--driver-java-options", s"-Dderby.system.home=${wareHousePath.getCanonicalPath}", unusedJar.toString) runSparkSubmit(args) } } object PROCESS_TABLES extends QueryTest with SQLTestUtils { // Tests the latest version of every release line. val testingVersions = Seq("2.0.2", "2.1.2", "2.2.0") protected var spark: SparkSession = _ def main(args: Array[String]): Unit = { val session = SparkSession.builder() .enableHiveSupport() .getOrCreate() spark = session import session.implicits._ testingVersions.indices.foreach { index => Seq( s"data_source_tbl_$index", s"hive_compatible_data_source_tbl_$index", s"external_data_source_tbl_$index", s"hive_compatible_external_data_source_tbl_$index", s"external_table_without_schema_$index").foreach { tbl => val tableMeta = spark.sharedState.externalCatalog.getTable("default", tbl) // make sure we can insert and query these tables. session.sql(s"insert into $tbl select 2") checkAnswer(session.sql(s"select * from $tbl"), Row(1) :: Row(2) :: Nil) checkAnswer(session.sql(s"select i from $tbl where i > 1"), Row(2)) // make sure we can rename table. val newName = tbl + "_renamed" sql(s"ALTER TABLE $tbl RENAME TO $newName") val readBack = spark.sharedState.externalCatalog.getTable("default", newName) val actualTableLocation = readBack.storage.locationUri.get.getPath val expectedLocation = if (tableMeta.tableType == CatalogTableType.EXTERNAL) { tableMeta.storage.locationUri.get.getPath } else { spark.sessionState.catalog.defaultTablePath(TableIdentifier(newName, None)).getPath } assert(actualTableLocation == expectedLocation) // make sure we can alter table location. withTempDir { dir => val path = dir.toURI.toString.stripSuffix("/") sql(s"ALTER TABLE ${tbl}_renamed SET LOCATION '$path'") val readBack = spark.sharedState.externalCatalog.getTable("default", tbl + "_renamed") val actualTableLocation = readBack.storage.locationUri.get.getPath val expected = dir.toURI.getPath.stripSuffix("/") assert(actualTableLocation == expected) } } // test permanent view checkAnswer(sql(s"select i from v_$index"), Row(1)) // SPARK-22356: overlapped columns between data and partition schema in data source tables val tbl_with_col_overlap = s"tbl_with_col_overlap_$index" // For Spark 2.2.0 and 2.1.x, the behavior is different from Spark 2.0. if (testingVersions(index).startsWith("2.1") || testingVersions(index) == "2.2.0") { spark.sql("msck repair table " + tbl_with_col_overlap) assert(spark.table(tbl_with_col_overlap).columns === Array("i", "j", "p")) checkAnswer(spark.table(tbl_with_col_overlap), Row(1, 1, 1) :: Row(1, 1, 1) :: Nil) assert(sql("desc " + tbl_with_col_overlap).select("col_name") .as[String].collect().mkString(",").contains("i,j,p")) } else { assert(spark.table(tbl_with_col_overlap).columns === Array("i", "p", "j")) checkAnswer(spark.table(tbl_with_col_overlap), Row(1, 1, 1) :: Row(1, 1, 1) :: Nil) assert(sql("desc " + tbl_with_col_overlap).select("col_name") .as[String].collect().mkString(",").contains("i,p,j")) } } } }
ron8hu/spark
sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
Scala
apache-2.0
10,190
package com.debasish.nlp.parsers import opennlp.tools.cmdline.parser.ParserTool import opennlp.tools.parser.{ParserFactory, ParserModel} /** * Created by debasish on 5/21/16. */ private[parsers] class OpenNlpParser extends Parser { private[this] val path = getClass.getResource("/models/en-parser-chunking.bin") private[this] val model = new ParserModel(path) def process(sentence: String): Unit = { val parser = ParserFactory.create(model) val parsedTree = ParserTool.parseLine(sentence, parser, 1) for(p <- parsedTree) p.show() } }
DEK11/MoreNLP
src/main/scala/com/debasish/nlp/parsers/OpenNlpParser.scala
Scala
apache-2.0
565
package de.metacoder.edwardthreadlocal.analysis import de.metacoder.edwardthreadlocal.analysis.datamodel.CallData.{CallToRemove, CallToSet, CurrentValueInfo} import de.metacoder.edwardthreadlocal.analysis.datamodel.{CallData, ValueInstanceID} import scala.annotation.tailrec import scala.language.postfixOps object FindFaultySubSeries { def apply(series:Seq[CallData])(implicit setup:AnalysisSetup):Map[ThreadLocal[_], Seq[CallData]] = postProcessedSeriesPerThreadLocal(series).filter(e ⇒ looksFaulty(e._2)) private def postProcessedSeriesPerThreadLocal(series:Seq[CallData]):Map[ThreadLocal[_], Seq[CallData]] = series groupBy { _.threadLocal } mapValues postProcessedSeriesForOneThreadLocal filterNot { _._2 isEmpty } private def postProcessedSeriesForOneThreadLocal(series:Seq[CallData]):Seq[CallData] = { def isInterestingConsecutivePair(before:CallData, after:CallData, isLastPair:Boolean) = if (before.threadLocal != after.threadLocal) true else (before, after) match { case (_:CallToRemove[_], CurrentValueInfo(_, nullInstanceID)) if nullInstanceID.refersToNull && !isLastPair ⇒ false case (CurrentValueInfo(_, same1), CurrentValueInfo(_, same2)) if same1 == same2 ⇒ false case (CallToSet(_, same1, _), CurrentValueInfo(_, same2)) if same1 == same2 && !isLastPair ⇒ false case (CallToSet(_, same1, _), CallToSet(_, same2, _)) if same1 == same2 ⇒ false case _ ⇒ true } def withoutBoringSubSequences(series:Seq[CallData]):Seq[CallData] = series match { case Seq() | Seq(_) ⇒ series case Seq(fst, snd, rst@_*) ⇒ if (!isInterestingConsecutivePair(fst, snd, rst isEmpty)) withoutBoringSubSequences(fst +: rst) else fst +: withoutBoringSubSequences(snd +: rst) } withoutBoringSubSequences(series) } private def looksFaulty(series:Seq[CallData]):Boolean = { @tailrec def recurse(s:Seq[CallData], initialValue:Option[ValueInstanceID], lastSeenValue:Option[ValueInstanceID]):Boolean = s match { case Seq() ⇒ initialValue != lastSeenValue case Seq(CurrentValueInfo(_, cv), rst@_*) if initialValue isEmpty ⇒ recurse(s, Some(cv), lastSeenValue) case Seq(CurrentValueInfo(_, cv), rst@_*) ⇒ recurse(rst, initialValue, Some(cv)) case Seq(CallToSet(_, cv, _), rst@_*) if initialValue isEmpty ⇒ recurse(s, Some(cv), lastSeenValue) case Seq(CallToSet(_, cv, _), rst@_*) ⇒ recurse(rst, initialValue, Some(cv)) case Seq(_:CallToRemove[_], rst@_*) ⇒ recurse(rst, initialValue, None) case Seq(_, rst@_*) ⇒ recurse(rst, initialValue, lastSeenValue) } recurse(series, None, None) } }
metacoder/edward-tl
agent-impl/src/main/scala/de/metacoder/edwardthreadlocal/analysis/FindFaultySubSeries.scala
Scala
apache-2.0
2,698
package org.tribbloid.ispark.msg import org.tribbloid.ispark.UUID import org.tribbloid.ispark.display.{Data, MIME} object ExecutionStatus extends Enumeration { type ExecutionStatus = Value val ok = Value val error = Value val abort = Value } object HistAccessType extends Enumeration { type HistAccessType = Value val range = Value val tail = Value val search = Value } object ExecutionState extends Enumeration { type ExecutionState = Value val busy = Value val idle = Value val starting = Value } object MsgType extends Enumeration { type MsgType = Value val execute_request, execute_reply, object_info_request, object_info_reply, complete_request, complete_reply, history_request, history_reply, connect_request, connect_reply, kernel_info_request, kernel_info_reply, shutdown_request, shutdown_reply, stream, display_data, pyin, pyout, pyerr, status, input_request, input_reply, comm_open, comm_msg, comm_close = Value } sealed trait Content sealed trait FromIPython extends Content sealed trait ToIPython extends Content case class Header( msg_id: UUID, username: String, session: UUID, msg_type: MsgType) case class Msg[+T <: Content]( idents: List[String], // XXX: Should be List[UUID]? header: Header, parent_header: Option[Header], metadata: Metadata, content: T) { private def replyHeader(msg_type: MsgType): Header = header.copy(msg_id=UUID.uuid4(), msg_type=msg_type) private def replyMsg[T <: ToIPython](idents: List[String], msg_type: MsgType, content: T, metadata: Metadata): Msg[T] = Msg(idents, replyHeader(msg_type), Some(header), metadata, content) def pub[T <: ToIPython](msg_type: MsgType, content: T, metadata: Metadata=Metadata()): Msg[T] = { val tpe = content match { case content: stream => content.name case _ => msg_type.toString } replyMsg(tpe :: Nil, msg_type, content, metadata) } def reply[T <: ToIPython](msg_type: MsgType, content: T, metadata: Metadata=Metadata()): Msg[T] = replyMsg(idents, msg_type, content, metadata) } case class execute_request( // Source code to be executed by the kernel, one or more lines. code: String, // A boolean flag which, if True, signals the kernel to execute // this code as quietly as possible. This means that the kernel // will compile the code with 'exec' instead of 'single' (so // sys.displayhook will not fire), forces store_history to be False, // and will *not*: // - broadcast exceptions on the PUB socket // - do any logging // // The default is False. silent: Boolean, // A boolean flag which, if True, signals the kernel to populate history // The default is True if silent is False. If silent is True, store_history // is forced to be False. store_history: Option[Boolean]=None, // A dict mapping names to expressions to be evaluated in the user's dict. The // rich display-data representation of each will be evaluated after execution. // See the display_data content for the structure of the representation data. user_expressions: Map[String, String], // Some frontends (e.g. the Notebook) do not support stdin requests. If // raw_input is called from code executed from such a frontend, a // StdinNotImplementedError will be raised. allow_stdin: Boolean) extends FromIPython sealed trait execute_reply extends ToIPython { // One of: 'ok' OR 'error' OR 'abort' val status: ExecutionStatus // The global kernel counter that increases by one with each request that // stores history. This will typically be used by clients to display // prompt numbers to the user. If the request did not store history, this will // be the current value of the counter in the kernel. val execution_count: Int } case class execute_ok_reply( execution_count: Int, // 'payload' will be a list of payload dicts. // Each execution payload is a dict with string keys that may have been // produced by the code being executed. It is retrieved by the kernel at // the end of the execution and sent back to the front end, which can take // action on it as needed. See main text for further details. payload: List[Map[String, String]], // Results for the user_expressions. user_expressions: Map[String, String]) extends execute_reply { val status = ExecutionStatus.ok } case class execute_error_reply( execution_count: Int, // Exception name, as a string ename: String, // Exception value, as a string evalue: String, // The traceback will contain a list of frames, represented each as a // string. For now we'll stick to the existing design of ultraTB, which // controls exception level of detail statefully. But eventually we'll // want to grow into a model where more information is collected and // packed into the traceback object, with clients deciding how little or // how much of it to unpack. But for now, let's start with a simple list // of strings, since that requires only minimal changes to ultratb as // written. traceback: List[String]) extends execute_reply { val status = ExecutionStatus.error } case class execute_abort_reply( execution_count: Int) extends execute_reply { val status = ExecutionStatus.abort } case class object_info_request( // The (possibly dotted) name of the object to be searched in all // relevant namespaces oname: String, // The level of detail desired. The default (0) is equivalent to typing // 'x?' at the prompt, 1 is equivalent to 'x??'. detail_level: Int) extends FromIPython case class ArgSpec( // The names of all the arguments args: List[String], // The name of the varargs (*args), if any varargs: String, // The name of the varkw (**kw), if any varkw: String, // The values (as strings) of all default arguments. Note // that these must be matched *in reverse* with the 'args' // list above, since the first positional args have no default // value at all. defaults: List[String]) sealed trait object_info_reply extends ToIPython { // The name the object was requested under val name: String // Boolean flag indicating whether the named object was found or not. If // it's false, all other fields will be empty. val found: Boolean } case class object_info_notfound_reply( name: String) extends object_info_reply { val found = false } case class object_info_found_reply( name: String, // Flags for magics and system aliases ismagic: Boolean, isalias: Boolean, // The name of the namespace where the object was found ('builtin', // 'magics', 'alias', 'interactive', etc.) namespace: String, // The type name will be type.__name__ for normal Python objects, but it // can also be a string like 'Magic function' or 'System alias' type_name: String, // The string form of the object, possibly truncated for length if // detail_level is 0 string_form: String, // For objects with a __class__ attribute this will be set base_class: String, // For objects with a __len__ attribute this will be set length: String, // If the object is a function, class or method whose file we can find, // we give its full path file: String, // For pure Python callable objects, we can reconstruct the object // definition line which provides its call signature. For convenience this // is returned as a single 'definition' field, but below the raw parts that // compose it are also returned as the argspec field. definition: String, // The individual parts that together form the definition string. Clients // with rich display capabilities may use this to provide a richer and more // precise representation of the definition line (e.g. by highlighting // arguments based on the user's cursor position). For non-callable // objects, this field is empty. argspec: ArgSpec, // For instances, provide the constructor signature (the definition of // the __init__ method): init_definition: String, // Docstrings: for any object (function, method, module, package) with a // docstring, we show it. But in addition, we may provide additional // docstrings. For example, for instances we will show the constructor // and class docstrings as well, if available. docstring: String, // For instances, provide the constructor and class docstrings init_docstring: String, class_docstring: String, // If it's a callable object whose call method has a separate docstring and // definition line: call_def: String, call_docstring: String, // If detail_level was 1, we also try to find the source code that // defines the object, if possible. The string 'None' will indicate // that no source was found. source: String) extends object_info_reply { val found = true } case class complete_request( // The text to be completed, such as 'a.is' // this may be an empty string if the frontend does not do any lexing, // in which case the kernel must figure out the completion // based on 'line' and 'cursor_pos'. text: String, // The full line, such as 'print a.is'. This allows completers to // make decisions that may require information about more than just the // current word. line: String, // The entire block of text where the line is. This may be useful in the // case of multiline completions where more context may be needed. Note: if // in practice this field proves unnecessary, remove it to lighten the // messages. block: Option[String], // The position of the cursor where the user hit 'TAB' on the line. cursor_pos: Int) extends FromIPython case class complete_reply( // The list of all matches to the completion request, such as // ['a.isalnum', 'a.isalpha'] for the above example. matches: List[String], // the substring of the matched text // this is typically the common prefix of the matches, // and the text that is already in the block that would be replaced by the full completion. // This would be 'a.is' in the above example. matched_text: String, // status should be 'ok' unless an exception was raised during the request, // in which case it should be 'error', along with the usual error message content // in other messages. status: ExecutionStatus) extends ToIPython case class history_request( // If True, also return output history in the resulting dict. output: Boolean, // If True, return the raw input history, else the transformed input. raw: Boolean, // So far, this can be 'range', 'tail' or 'search'. hist_access_type: HistAccessType, // If hist_access_type is 'range', get a range of input cells. session can // be a positive session number, or a negative number to count back from // the current session. session: Option[Int], // start and stop are line numbers within that session. start: Option[Int], stop: Option[Int], // If hist_access_type is 'tail' or 'search', get the last n cells. n: Option[Int], // If hist_access_type is 'search', get cells matching the specified glob // pattern (with * and ? as wildcards). pattern: Option[String], // If hist_access_type is 'search' and unique is true, do not // include duplicated history. Default is false. unique: Option[Boolean]) extends FromIPython case class history_reply( // A list of 3 tuples, either: // (session, line_number, input) or // (session, line_number, (input, output)), // depending on whether output was False or True, respectively. history: List[(Int, Int, Either[String, (String, Option[String])])]) extends ToIPython case class connect_request() extends FromIPython case class connect_reply( // The port the shell ROUTER socket is listening on. shell_port: Int, // The port the PUB socket is listening on. iopub_port: Int, // The port the stdin ROUTER socket is listening on. stdin_port: Int, // The port the heartbeat socket is listening on. hb_port: Int) extends ToIPython case class kernel_info_request() extends FromIPython case class kernel_info_reply( // Version of messaging protocol (mandatory). // The first integer indicates major version. It is incremented when // there is any backward incompatible change. // The second integer indicates minor version. It is incremented when // there is any backward compatible change. protocol_version: (Int, Int), // IPython version number (optional). // Non-python kernel backend may not have this version number. // The last component is an extra field, which may be 'dev' or // 'rc1' in development version. It is an empty string for // released version. ipython_version: Option[(Int, Int, Int, String)]=None, // Language version number (mandatory). // It is Python version number (e.g., [2, 7, 3]) for the kernel // included in IPython. language_version: List[Int], // Programming language in which kernel is implemented (mandatory). // Kernel included in IPython returns 'python'. language: String) extends ToIPython case class shutdown_request( // whether the shutdown is final, or precedes a restart restart: Boolean) extends FromIPython case class shutdown_reply( // whether the shutdown is final, or precedes a restart restart: Boolean) extends ToIPython case class stream( // The name of the stream is one of 'stdout', 'stderr' name: String, // The data is an arbitrary string to be written to that stream data: String) extends ToIPython case class display_data( // Who create the data source: String, // The data dict contains key/value pairs, where the kids are MIME // types and the values are the raw data of the representation in that // format. data: Data, // Any metadata that describes the data metadata: Metadata) extends ToIPython case class pyin( // Source code to be executed, one or more lines code: String, // The counter for this execution is also provided so that clients can // display it, since IPython automatically creates variables called _iN // (for input prompt In[N]). execution_count: Int) extends ToIPython case class pyout( // The counter for this execution is also provided so that clients can // display it, since IPython automatically creates variables called _N // (for prompt N). execution_count: Int, // data and metadata are identical to a display_data message. // the object being displayed is that passed to the display hook, // i.e. the *result* of the execution. data: Data, metadata: Metadata = Metadata()) extends ToIPython case class pyerr( execution_count: Int, // Exception name, as a string ename: String, // Exception value, as a string evalue: String, // The traceback will contain a list of frames, represented each as a // string. For now we'll stick to the existing design of ultraTB, which // controls exception level of detail statefully. But eventually we'll // want to grow into a model where more information is collected and // packed into the traceback object, with clients deciding how little or // how much of it to unpack. But for now, let's start with a simple list // of strings, since that requires only minimal changes to ultratb as // written. traceback: List[String]) extends ToIPython object pyerr { // XXX: can't use apply(), because of https://github.com/playframework/playframework/issues/2031 def fromThrowable(execution_count: Int, exception: Throwable): pyerr = { val name = exception.getClass.getName val value = Option(exception.getMessage) getOrElse "" val stacktrace = exception .getStackTrace .takeWhile(_.getFileName != "<console>") .toList val traceback = s"$name: $value" :: stacktrace.map(" " + _) pyerr(execution_count=execution_count, ename=name, evalue=value, traceback=traceback) } } case class status( // When the kernel starts to execute code, it will enter the 'busy' // state and when it finishes, it will enter the 'idle' state. // The kernel will publish state 'starting' exactly once at process startup. execution_state: ExecutionState) extends ToIPython case class clear_output( // Wait to clear the output until new output is available. Clears the // existing output immediately before the new output is displayed. // Useful for creating simple animations with minimal flickering. _wait: Boolean) extends ToIPython case class input_request( prompt: String) extends ToIPython case class input_reply( value: String) extends FromIPython import play.api.libs.json.JsObject case class comm_open( comm_id: UUID, target_name: String, data: JsObject) extends ToIPython with FromIPython case class comm_msg( comm_id: UUID, data: JsObject) extends ToIPython with FromIPython case class comm_close( comm_id: UUID, data: JsObject) extends ToIPython with FromIPython // XXX: This was originally in src/main/scala/Formats.scala, but due to // a bug in the compiler related to `knownDirectSubclasses` and possibly // also other bugs (e.g. `isCaseClass`), formats had to be moved here // and explicit type annotations had to be added for formats of sealed // traits. Otherwise no known subclasses will be reported. import org.tribbloid.ispark.json.{EnumJson, Json} import play.api.libs.json.{JsObject, Writes} package object formats { implicit val MIMEFormat = new Writes[MIME] { def writes(mime: MIME) = implicitly[Writes[String]].writes(mime.name) } implicit val DataFormat = new Writes[Data] { def writes(data: Data) = { JsObject(data.items.map { case (mime, value) => mime.name -> implicitly[Writes[String]].writes(value) }) } } import org.tribbloid.ispark.json.JsonImplicits._ implicit val MsgTypeFormat = EnumJson.format(MsgType) implicit val HeaderFormat = Json.format[Header] implicit val ExecutionStatusFormat = EnumJson.format(ExecutionStatus) implicit val ExecutionStateFormat = EnumJson.format(ExecutionState) implicit val HistAccessTypeFormat = EnumJson.format(HistAccessType) implicit val ArgSpecFormat = Json.format[ArgSpec] implicit val ExecuteRequestJSON = Json.format[execute_request] implicit val ExecuteReplyJSON: Writes[execute_reply] = Json.writes[execute_reply] implicit val ObjectInfoRequestJSON = Json.format[object_info_request] implicit val ObjectInfoReplyJSON: Writes[object_info_reply] = Json.writes[object_info_reply] implicit val CompleteRequestJSON = Json.format[complete_request] implicit val CompleteReplyJSON = Json.format[complete_reply] implicit val HistoryRequestJSON = Json.format[history_request] implicit val HistoryReplyJSON = Json.format[history_reply] implicit val ConnectRequestJSON = Json.noFields[connect_request] implicit val ConnectReplyJSON = Json.format[connect_reply] implicit val KernelInfoRequestJSON = Json.noFields[kernel_info_request] implicit val KernelInfoReplyJSON = Json.format[kernel_info_reply] implicit val ShutdownRequestJSON = Json.format[shutdown_request] implicit val ShutdownReplyJSON = Json.format[shutdown_reply] implicit val StreamJSON = Json.writes[stream] implicit val DisplayDataJSON = Json.writes[display_data] implicit val PyinJSON = Json.writes[pyin] implicit val PyoutJSON = Json.writes[pyout] implicit val PyerrJSON = Json.writes[pyerr] implicit val StatusJSON = Json.writes[status] implicit val ClearOutputJSON = new Writes[clear_output] { def writes(obj: clear_output) = { // NOTE: `wait` is a final member on Object, so we have to go through hoops JsObject(Seq("wait" -> implicitly[Writes[Boolean]].writes(obj._wait))) } } implicit val InputRequestJSON = Json.format[input_request] implicit val InputReplyJSON = Json.format[input_reply] implicit val CommOpenJSON = Json.format[comm_open] implicit val CommMsgJSON = Json.format[comm_msg] implicit val CommCloseJSON = Json.format[comm_close] }
deeplearning4j/ISpark
core/src/main/scala/org/tribbloid/ispark/msg/Msg.scala
Scala
apache-2.0
26,866
package com.microsoft.awt.data import org.scalajs.nodejs.mongodb.{Collection, Db} import scala.concurrent.ExecutionContext import scala.scalajs.js /** * Post DAO * @author lawrence.daniels@gmail.com */ @js.native trait PostDAO extends Collection /** * Post DAO Companion * @author lawrence.daniels@gmail.com */ object PostDAO { /** * Post DAO Extensions * @param db the given [[Db database]] */ implicit class PostDAOExtensions(val db: Db) extends AnyVal { @inline def getPostDAO(implicit ec: ExecutionContext) = { db.collectionFuture("posts").mapTo[PostDAO] } } }
ldaniels528/awt
app-nodejs/src/main/scala/com/microsoft/awt/data/PostDAO.scala
Scala
apache-2.0
621
package us.feliscat.text.normalizer.ja import us.feliscat.m17n.JapaneseLocale import us.feliscat.text.StringOption import us.feliscat.text.normalizer.MultiLingualDictionaryBasedNormalizer /** * @author K.Sakamoto * Created on 2016/08/06 */ object JapaneseSentenceBeginningNormalizer extends MultiLingualDictionaryBasedNormalizer( StringOption("sentence_beginning_normalization.yml")) with JapaneseLocale { override protected def replaceAll(input: String, term: String, replacement: String): String = { input.replaceAll(raw"""^$term""", replacement) } }
ktr-skmt/FelisCatusZero-multilingual
libraries/src/main/scala/us/feliscat/text/normalizer/ja/JapaneseSentenceBeginningNormalizer.scala
Scala
apache-2.0
584
/** * Copyright 2017 Alessandro Simi * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.exemplary.aws import com.amazonaws.services.dynamodbv2.model._ import scala.collection.JavaConverters._ trait DynamoDBOperations { test: AbstractTest => def createTable(name: String, hashKeyName: String = "key", readThroughput: Int = 10, writeThroughput: Int = 10, streamEnabled: Boolean = false): CreateTableResult = { val attributeDefinition = new AttributeDefinition() .withAttributeName(hashKeyName) .withAttributeType(ScalarAttributeType.S) val keySchema = new KeySchemaElement() .withAttributeName(hashKeyName) .withKeyType(KeyType.HASH) val provisionedThroughput = new ProvisionedThroughput() .withReadCapacityUnits(readThroughput.toLong) .withWriteCapacityUnits(writeThroughput.toLong) createTable(name, attributeDefinition, keySchema, provisionedThroughput, streamEnabled) } private def createTable(name: String, attributeDefinition: AttributeDefinition, keySchema: KeySchemaElement, provisionedThroughput: ProvisionedThroughput, streamEnabled: Boolean): CreateTableResult = if (streamEnabled) { val streamSpecification = new StreamSpecification() .withStreamEnabled(true) .withStreamViewType(StreamViewType.NEW_AND_OLD_IMAGES) val createTableRequest = new CreateTableRequest() .withTableName(name) .withKeySchema(keySchema) .withAttributeDefinitions(attributeDefinition) .withProvisionedThroughput(provisionedThroughput) .withStreamSpecification(streamSpecification) resultOf(client.createTable(createTableRequest)) } else { resultOf(client.createTable( attributeDefinitions = List(attributeDefinition), tableName = name, keySchema = List(keySchema), provisionedThroughput = provisionedThroughput )) } def describeTable(name: String) = resultOf(client.describeTable("tableName")).getTable def putItem(tableName: String, values: Map[String, String]): PutItemResult = { resultOf(client.putItem(tableName, values.mapValues(new AttributeValue(_)))) } def putWriteRequest(values: Map[String, String]): WriteRequest = { val writeRequest = new WriteRequest() val putRequest = new PutRequest(values.mapValues(new AttributeValue(_)).asJava) writeRequest.setPutRequest(putRequest) writeRequest } def keyAndAttributes(key: String, value: String): KeysAndAttributes = { val keysAndAttributes = new KeysAndAttributes() val keys = Map("key" -> new AttributeValue(value)).asJava keysAndAttributes.setKeys(List(keys).asJava) keysAndAttributes } }
alessandrosimi/aws-dynamodb-nio
dynamodb/src/test/scala/io/exemplary/aws/DynamoDBOperations.scala
Scala
apache-2.0
3,352
import scala.reflect.ClassTag type Matcher[A] = A match { case String => String } def patternMatch[A](a: Any)(using tt: ClassTag[Matcher[A]]): Option[Matcher[A]] = { // type T = RDF.Triple[Rdf] a match { case res: Matcher[A] => Some(res) case _ => None } } def patternMatchWithAlias[A](a: Any)(using tt: ClassTag[Matcher[A]]): Option[Matcher[A]] = { type T = Matcher[A] a match { case res: T => Some(res) case _ => None } } @main def main = { println(patternMatch[String]("abc")) println(patternMatchWithAlias[String]("abc")) println(patternMatch[String](1)) println(patternMatchWithAlias[String](1)) }
lampepfl/dotty
tests/pos-special/fatal-warnings/i13433b.scala
Scala
apache-2.0
645
package at.forsyte.apalache.tla.bmcmt import at.forsyte.apalache.tla.bmcmt.caches.{EqCache, EqCacheSnapshot} import at.forsyte.apalache.tla.bmcmt.implicitConversions._ import at.forsyte.apalache.tla.bmcmt.rewriter.{ConstSimplifierForSmt, Recoverable} import at.forsyte.apalache.tla.bmcmt.types._ import at.forsyte.apalache.tla.lir.convenience.tla import at.forsyte.apalache.tla.lir.{NameEx, NullEx, TlaEx} /** * Generate equality constraints between cells and cache them to avoid redundant constraints. * * @author Igor Konnov */ class LazyEquality(rewriter: SymbStateRewriter) extends StackableContext with Serializable with Recoverable[EqCacheSnapshot] { @transient private lazy val simplifier = new ConstSimplifierForSmt private val eqCache = new EqCache() /** * This method ensure that a pair of its arguments can be safely compared by the SMT equality, * that is, all the necessary constraints have been generated with cacheEqualities. * * @param left a left cell * @param right a right cell * @return tla.eql(left, right), provided that left and right can be compared */ def safeEq(left: ArenaCell, right: ArenaCell): TlaEx = { if (!left.cellType.comparableWith(right.cellType)) { // Trivially not equal due to incomparable types. // As this comparison usually indicates a coding problem, throw an exception here. // If you still think that this is okay to compare variables of different types, insert a check before safeEq. throw new RewriterException("Trivial inequality, as the types are different (check your code): type(%s) = %s, while type(%s) = %s" .format(left.name, left.cellType, right.name, right.cellType), NullEx) } else if (left == right) { tla.bool(true) // this is just true } else { val entry = eqCache.get(left, right) if (entry.isDefined) { eqCache.toTla(left, right, entry.get) } else { // let's add a bit of German here to indicate that it is really dangerous val msg = "VORSICHT! SMT equality should be used only after calling cacheEqualities, unless you know what you are doing." throw new RewriterException(msg, NullEx) } } } /** * Check that the equality constraints were cached for left and right. * Then, if left and right are of comparable types, use SMT equality, * otherwise just return false. The difference between safeEq and cachedEq is that * safeEq is stricter: it does not allow to compare cells of different types at all. * Use cachedEq when you comparisons might involve cells of different types, * and it is clear that these elements cannot be equal. * * @param left a left cell * @param right a right cell * @return depending on the types of the both cells, return either (= left right), or false */ def cachedEq(left: ArenaCell, right: ArenaCell): TlaEx = { if (left == right) { tla.bool(true) // this is just true } else { val entry = eqCache.get(left, right) if (entry.isDefined) { eqCache.toTla(left, right, entry.get) } else if (!left.cellType.comparableWith(right.cellType)) { tla.bool(false) // just false as the types are different } else { // let's add a bit of German here to indicate that it is really dangerous val msg = "VORSICHT! SMT equality should be used only after calling cacheEqualities, unless you know what you are doing." throw new RewriterException(msg, NullEx) } } } /** * Produce equality constraints for each pair in the sequence, so that we can later compare all the pairs as cells * using SMT equality (=). Since equality semantics may require us to rewrite the arena and introduce * new SMT constraints, this method may invoke rewriting rules and modify the symbolic state. * * That the equality constraints were introduced for each pair is recorded in the local cache. Thus, the constraints * are generated only once for each pair of cells. * * @param state a symbolic state to start with * @param pairs pairs of cells, for which the equality constraints should be generated * @return a new symbolic state that contains the constraints for every pair in the sequence */ def cacheEqConstraints(state: SymbState, pairs: Traversable[(ArenaCell, ArenaCell)]): SymbState = { rewriter.solverContext.log("; [START] Caching equality constraints for a sequence: " + pairs) def makeOne(state: SymbState, pair: (ArenaCell, ArenaCell)): SymbState = { cacheOneEqConstraint(state, pair._1, pair._2) } val result = pairs.foldLeft(state)(makeOne) rewriter.solverContext.log("; [DONE] Caching equality constraints") result } /** * Given a pair of cells, generate equality constraints and return a new symbolic state * (leaving the original expression in the state unmodified). * * @param state a symbolic state * @param left left cell to compare * @param right right cell to compare * @return a new symbolic state */ def cacheOneEqConstraint(state: SymbState, left: ArenaCell, right: ArenaCell): SymbState = { val cacheEntry = eqCache.get(left, right) if (left == right) { state } else if (cacheEntry.isDefined) { state // do nothing } else if (!left.cellType.comparableWith(right.cellType)) { // cells of incomparable types cannot be equal eqCache.put(left, right, EqCache.FalseEntry()) state } else { // generate constraints val newState = (left.cellType, right.cellType) match { case (UnknownT(), UnknownT()) | (BoolT(), _) | (_, BoolT()) | (IntT(), IntT()) | (ConstT(), ConstT()) => eqCache.put(left, right, EqCache.EqEntry()) state // nothing to do, just use the built-in equality case (FinSetT(_), FinSetT(_)) => mkSetEq(state, left, right) case (FunT(_, _), FunT(_, _)) => mkFunEq(state, left, right) case (RecordT(_), RecordT(_)) => mkRecordEq(state, left, right) case (TupleT(_), TupleT(_)) => mkTupleEq(state, left, right) case (SeqT(_), SeqT(_)) => mkSeqEq(state, left, right) case _ => throw new CheckerException("Unexpected equality test", state.ex) } // return the new state newState } } /** * Cache the equality as the SMT equality. When we know that we can use SMT equality by construction, e.g., * see PICK FROM {S_1, ..., S_n}, we can tell the cache just to use the SMT equality. Use this method with care, * as it can easily produce unsound results! * * @param left a left cell * @param right a right cell */ def cacheAsSmtEqualityByMagic(left: ArenaCell, right: ArenaCell): Unit = { eqCache.put(left, right, EqCache.EqEntry()) } /** * Count the number of valid equalities. Use this method only for debugging purposes, as it is quite slow. * @return a pair: the number of valid equalities, and the total number of non-constant equalities */ def countConstantEqualities(): (Int, Int) = { val solver = rewriter.solverContext def isConstant(pred: TlaEx): Boolean = { solver.push() solver.assertGroundExpr(pred) val exEq = solver.sat() solver.pop() solver.push() solver.assertGroundExpr(tla.not(pred)) val exNeq = solver.sat() solver.pop() exEq && !exNeq || exNeq && !exEq } def onEntry(pair: (ArenaCell, ArenaCell), entryAndLevel: (EqCache.CacheEntry, Int)): Int = { entryAndLevel._1 match { case EqCache.EqEntry() => if (isConstant(tla.eql(pair._1, pair._2))) 1 else 0 case EqCache.ExprEntry(pred) => if (isConstant(pred)) 1 else 0 case _ => 0 } } def isNonStatic(pair: (ArenaCell, ArenaCell), entryAndLevel: (EqCache.CacheEntry, Int)): Int = { entryAndLevel._1 match { case EqCache.FalseEntry() => 0 case EqCache.TrueEntry() => 0 case _ => 1 } } val eqMap = eqCache.getMap val nConst = (eqMap map (onEntry _).tupled).sum val nNonStatic = (eqMap map (isNonStatic _).tupled).sum (nConst, nNonStatic) } private def mkSetEq(state: SymbState, left: ArenaCell, right: ArenaCell): SymbState = { if (left.cellType == FinSetT(UnknownT()) && state.arena.getHas(left).isEmpty) { // The statically empty set is a very special case, as its element type is unknown. // Hence, we cannot use SMT equality, as it does not work with different sorts. mkEmptySetEq(state, left, right) } else if (right.cellType == FinSetT(UnknownT()) && state.arena.getHas(right).isEmpty) { mkEmptySetEq(state, right, left) // same here } else { // in general, we need 2 * |X| * |Y| comparisons val leftToRight: SymbState = subsetEq(state, left, right) val rightToLeft: SymbState = subsetEq(leftToRight, right, left) // the type checker makes sure that this holds true assert(left.cellType.signature == right.cellType.signature) // These two sets have the same signature and thus belong to the same sort. // Hence, we can use SMT equality. This equality is needed by uninterpreted functions. val eq = tla.equiv(tla.eql(left, right), tla.and(leftToRight.ex, rightToLeft.ex)) rewriter.solverContext.assertGroundExpr(eq) eqCache.put(left, right, EqCache.EqEntry()) // recover the original expression and theory rightToLeft.setRex(state.ex) } } // statically empty sets should be handled with care private def mkEmptySetEq(state: SymbState, emptySet: ArenaCell, otherSet: ArenaCell): SymbState = { val otherElems = state.arena.getHas(otherSet) if (otherElems.isEmpty) { // That's simple. Two statically empty sets are equal. eqCache.put(emptySet, otherSet, EqCache.TrueEntry()) state } else { // The other set might be empty in some models. Add a predicate. val newState = state.updateArena(_.appendCell(BoolT())) val pred = newState.arena.topCell val emptyEx = tla.and(otherElems.map(e => tla.not(tla.in(e, otherSet))) :_*) rewriter.solverContext.assertGroundExpr(tla.eql(pred, emptyEx)) // this predicate will be later used as an equality test eqCache.put(emptySet, otherSet, EqCache.ExprEntry(pred)) newState } } /** * Check, whether one set is a subset of another set (not a proper one). * This method changed the underlying theory to BoolTheory. * * Since this operation is tightly related to set equality, we moved it here. * * @param state a symbolic state * @param left a left cell that holds a set * @param right a right cell that holds a set * @return a new symbolic state with a (Boolean) predicate equivalent to `left \\subseteq right`. */ def subsetEq(state: SymbState, left: ArenaCell, right: ArenaCell): SymbState = { val leftElems = state.arena.getHas(left) val rightElems = state.arena.getHas(right) if (leftElems.isEmpty) { // SE-SUBSETEQ1 state.setRex(state.arena.cellTrue()) } else if (rightElems.isEmpty) { // SE-SUBSETEQ2 def notIn(le: ArenaCell) = { tla.not(tla.in(le, left)) } val newState = state.updateArena(_.appendCell(BoolT())) val pred = newState.arena.topCell rewriter.solverContext.assertGroundExpr(tla.eql(pred, tla.and(leftElems.map(notIn): _*))) newState.setRex(pred) } else { // SE-SUBSETEQ3 var newState = cacheEqConstraints(state, leftElems cross rightElems) // cache all the equalities def exists(lelem: ArenaCell) = { def inAndEq(relem: ArenaCell) = { simplifier.simplifyShallow(tla.and(tla.in(relem, right), cachedEq(lelem, relem))) } // There are plenty of valid subformulas. Simplify! simplifier.simplifyShallow(tla.or(rightElems.map(inAndEq): _*)) } def notInOrExists(lelem: ArenaCell) = { val notInOrExists = simplifier.simplifyShallow(tla.or(tla.not(tla.in(lelem, left)), exists(lelem))) if (simplifier.isBoolConst(notInOrExists)) { notInOrExists // just return the constant } else { // BUG: this produced OOM on the inductive invariant of Paxos // BUGFIX: push this query to the solver, in order to avoid constructing enormous assertions newState = newState.updateArena(_.appendCell(BoolT())) val pred = newState.arena.topCell rewriter.solverContext.assertGroundExpr(simplifier.simplifyShallow(tla.equiv(pred, notInOrExists))) pred.toNameEx } } val forEachNotInOrExists = simplifier.simplifyShallow(tla.and(leftElems.map(notInOrExists): _*)) newState = newState.updateArena(_.appendCell(BoolT())) val pred = newState.arena.topCell rewriter.solverContext.assertGroundExpr(tla.eql(pred, forEachNotInOrExists)) newState.setRex(pred) } } /** * Take a snapshot and return it * * @return the snapshot */ override def snapshot(): EqCacheSnapshot = { eqCache.snapshot() } /** * Recover a previously saved snapshot (not necessarily saved by this object). * * @param shot a snapshot */ override def recover(shot: EqCacheSnapshot): Unit = { eqCache.recover(shot) } /** * Get the current context level, that is the difference between the number of pushes and pops made so far. * * @return the current level, always non-negative. */ override def contextLevel: Int = { eqCache.contextLevel } /** * Save the current context and push it on the stack for a later recovery with pop. */ override def push(): Unit = { eqCache.push() } /** * Pop the previously saved context. Importantly, pop may be called multiple times and thus it is not sufficient * to save only the latest context. */ override def pop(): Unit = { eqCache.pop() } /** * Pop the context as many times as needed to reach a given level. * * @param n pop n times, if n > 0, otherwise, do nothing */ override def pop(n: Int): Unit = { eqCache.pop(n) } /** * Clean the context */ override def dispose(): Unit = { eqCache.dispose() } /** * Compare two functions. In the new implementation, we just compare the associated relations as sets. * @param state * @param leftFun * @param rightFun * @return the new symbolic state */ private def mkFunEq(state: SymbState, leftFun: ArenaCell, rightFun: ArenaCell): SymbState = { val leftRel = state.arena.getCdm(leftFun) val rightRel = state.arena.getCdm(rightFun) val relEq = mkSetEq(state, leftRel, rightRel) rewriter.solverContext.assertGroundExpr(tla.equiv(tla.eql(leftFun, rightFun), tla.eql(leftRel, rightRel))) eqCache.put(leftFun, rightFun, EqCache.EqEntry()) // restore the original expression and theory relEq.setRex(state.ex) } private def mkRecordEq(state: SymbState, leftRec: ArenaCell, rightRec: ArenaCell): SymbState = { val leftType = leftRec.cellType.asInstanceOf[RecordT] val rightType = rightRec.cellType.asInstanceOf[RecordT] val leftDom = state.arena.getDom(leftRec) val rightDom = state.arena.getDom(rightRec) val leftElems = state.arena.getHas(leftRec) val rightElems = state.arena.getHas(rightRec) // the intersection of the keys, as we can assume that the static domains are equal val commonKeys = leftType.fields.keySet.intersect(rightType.fields.keySet) var newState = state def keyEq(key: String): TlaEx = { val leftIndex = leftType.fields.keySet.toList.indexOf(key) val rightIndex = rightType.fields.keySet.toList.indexOf(key) val leftElem = leftElems(leftIndex) val rightElem = rightElems(rightIndex) newState = cacheOneEqConstraint(newState, leftElem, rightElem) val (newArena, keyCell) = rewriter.strValueCache.getOrCreate(newState.arena, key) newState = newState.setArena(newArena) // it is safe to use in directly since: // (1) the record types coincide, // (2) record constructors use RecordDomainCache, // (3) and CherryPick uses pickRecordDomain val membershipTest = tla.in(keyCell, leftDom) // newState = rewriter.rewriteUntilDone(newState.setRex(tla.in(keyCell, leftDom))) // the old way tla.or(tla.not(membershipTest), safeEq(leftElem, rightElem)) } newState = cacheOneEqConstraint(newState, leftDom, rightDom) val eqs = commonKeys.toList map keyEq val cons = if (eqs.isEmpty) safeEq(leftDom, rightDom) else tla.and(safeEq(leftDom, rightDom) +: eqs :_*) rewriter.solverContext.assertGroundExpr(tla.equiv(tla.eql(leftRec, rightRec), cons)) eqCache.put(leftRec, rightRec, EqCache.EqEntry()) // restore the original expression and theory newState.setRex(state.ex) } private def mkTupleEq(state: SymbState, left: ArenaCell, right: ArenaCell): SymbState = { val leftType = left.cellType.asInstanceOf[TupleT] val rightType = right.cellType.asInstanceOf[TupleT] if (!leftType.comparableWith(rightType)) { state } else { var newState = state def elemEq(lelem: ArenaCell, relem: ArenaCell): TlaEx = { newState = cacheOneEqConstraint(newState, lelem, relem) safeEq(lelem, relem) } val leftElems = state.arena.getHas(left) val rightElems = state.arena.getHas(right) val tupleEq = tla.and(leftElems.zip(rightElems).map(p => elemEq(p._1, p._2)) :_*) rewriter.solverContext.assertGroundExpr(tla.equiv(tla.eql(left, right), tupleEq)) eqCache.put(left, right, EqCache.EqEntry()) // restore the original expression and theory newState.setRex(state.ex) } } private def mkSeqEq(state: SymbState, left: ArenaCell, right: ArenaCell): SymbState = { // XXXXabcXX = XabcXX val leftCells = state.arena.getHas(left) val rightCells = state.arena.getHas(right) val (leftStart, leftEnd) = (leftCells.head, leftCells.tail.head) val (rightStart, rightEnd) = (rightCells.head, rightCells.tail.head) val (leftElems, rightElems) = (leftCells.tail.tail, rightCells.tail.tail) var nextState = state def eqPairwise(no: Int): TlaEx = { // Use function application here. This may look expensive, but is there any better way? nextState = rewriter.rewriteUntilDone(nextState.setRex(tla.appFun(left, tla.int(no)))) val le = nextState.asCell nextState = rewriter.rewriteUntilDone(nextState.setRex(tla.appFun(right, tla.int(no)))) val re = nextState.asCell nextState = cacheEqConstraints(nextState, (le, re) :: Nil) safeEq(le, re) } val minLen = Math.min(leftElems.size, rightElems.size) val elemsEq = tla.and(1 to minLen map eqPairwise :_*) val sizesEq = tla.eql(tla.minus(leftEnd, leftStart), tla.minus(rightEnd, rightStart)) rewriter.solverContext.assertGroundExpr(tla.equiv(tla.eql(left, right), tla.and(sizesEq, elemsEq))) eqCache.put(left, right, EqCache.EqEntry()) // restore the original expression and theory nextState.setRex(state.ex) } }
konnov/apalache
tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/LazyEquality.scala
Scala
apache-2.0
19,384
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.manager.utils.logkafka81 import java.util.Properties import scala.util.matching.Regex import kafka.manager.utils.LogkafkaNewConfigs object Defaults { val Valid = true val FollowLast = true val ReadFromHead = true val BatchSize = 200 val LineDelimiter = 10 // 10 means ascii '\\n' val RemoveDelimiter = true val Topic = "" val Key = "" val Partition = -1 val CompressionCodec= "none" val RequiredAcks = 1 val MessageTimeoutMs = 0 val RegexFilterPattern = "" val LaggingMaxBytes = 0 val RotateLaggingMaxSec = 0 } /** * Configuration settings for a log * @param valid Enable now or not * @param followLast If set to "false", when restarting logkafka process, the log_path formatted with current time will be collect; If set to "true", when restarting logkafka process, the last collecting file will be collected continually * @param readFromHead If set to "false", the first file will be collected from tail; If set to "true", the first file will be collected from head * @param batchSize The batch size of messages to be sent * @param lineDelimiter Delimiter of log file lines * @param removeDelimiter Remove delimiter or not when collecting log file lines * @param topic The topic of messages to be sent * @param key The key of messages to be sent * @param partition The partition of messages to be sent. -1 : random n(>=0): partition n * @param compressionCodec Optional compression method of messages: none, gzip, snappy * @param requiredAcks Number of required acks * @param messageTimeoutMs Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. * @param regexFilterPattern The messages matching this pattern will be dropped. * */ case class LogConfig(val valid: Boolean = Defaults.Valid, val followLast: Boolean = Defaults.FollowLast, val readFromHead: Boolean = Defaults.ReadFromHead, val batchSize: Long = Defaults.BatchSize, val lineDelimiter: Int = Defaults.LineDelimiter, val removeDelimiter: Boolean = Defaults.RemoveDelimiter, val topic: String = Defaults.Topic, val key: String = Defaults.Key, val partition: Int = Defaults.Partition, val compressionCodec: String = Defaults.CompressionCodec, val requiredAcks: Int = Defaults.RequiredAcks, val messageTimeoutMs: Long = Defaults.MessageTimeoutMs, val regexFilterPattern: String = Defaults.RegexFilterPattern, val laggingMaxBytes: Long = Defaults.LaggingMaxBytes, val rotateLaggingMaxSec: Long = Defaults.RotateLaggingMaxSec) { def toProps: Properties = { val props = new Properties() import LogConfig._ props.put(ValidProp, valid.toString) props.put(FollowLastProp, followLast.toString) props.put(ReadFromHeadProp, readFromHead.toString) props.put(BatchSizeProp, batchSize.toString) props.put(LineDelimiterProp, lineDelimiter.toString) props.put(RemoveDelimiterProp, removeDelimiter.toString) props.put(TopicProp, topic.toString) props.put(KeyProp, key.toString) props.put(PartitionProp, partition.toString) props.put(CompressionCodecProp, compressionCodec.toString) props.put(RequiredAcksProp, requiredAcks.toString) props.put(MessageTimeoutMsProp, messageTimeoutMs.toString) props.put(RegexFilterPatternProp, regexFilterPattern.toString) props } /** * Get the absolute value of the given number. If the number is Int.MinValue return 0. * This is different from java.lang.Math.abs or scala.math.abs in that they return Int.MinValue (!). */ def abs(n: Int) = if(n == Integer.MIN_VALUE) 0 else math.abs(n) } object LogConfig extends LogkafkaNewConfigs { import kafka.manager.utils.logkafka81.LogkafkaConfigErrors._ import kafka.manager.utils._ val minLineDelimiter = 0 val maxLineDelimiter = 255 val maxRegexFilterPatternLength = 255 val ValidProp = "valid" val FollowLastProp = "follow_last" val ReadFromHeadProp = "read_from_head" val BatchSizeProp = "batchsize" val LineDelimiterProp = "line_delimiter" val RemoveDelimiterProp = "remove_delimiter" val TopicProp = "topic" val KeyProp = "key" val PartitionProp = "partition" val CompressionCodecProp = "compression_codec" val RequiredAcksProp = "required_acks" val MessageTimeoutMsProp = "message_timeout_ms" val RegexFilterPatternProp = "regex_filter_pattern" val LaggingMaxBytesProp = "lagging_max_bytes" val RotateLaggingMaxSecProp = "rotate_lagging_max_sec" val ConfigMaps = Map(ValidProp -> Defaults.Valid.toString, FollowLastProp -> Defaults.FollowLast.toString, ReadFromHeadProp -> Defaults.ReadFromHead.toString, BatchSizeProp -> Defaults.BatchSize.toString, LineDelimiterProp -> Defaults.LineDelimiter.toString, RemoveDelimiterProp -> Defaults.RemoveDelimiter.toString, TopicProp -> Defaults.Topic.toString, KeyProp -> Defaults.Key.toString, PartitionProp -> Defaults.Partition.toString, CompressionCodecProp -> Defaults.CompressionCodec.toString, RequiredAcksProp -> Defaults.RequiredAcks.toString, MessageTimeoutMsProp -> Defaults.MessageTimeoutMs.toString, RegexFilterPatternProp -> Defaults.RegexFilterPattern.toString, LaggingMaxBytesProp -> Defaults.LaggingMaxBytes.toString, RotateLaggingMaxSecProp -> Defaults.RotateLaggingMaxSec.toString) def configMaps = ConfigMaps val ConfigNames = ConfigMaps.keySet def configNames = ConfigNames /** * Parse the given properties instance into a LogConfig object */ def fromProps(props: Properties): LogConfig = { new LogConfig(valid = props.getProperty(ValidProp, Defaults.Valid.toString).toBoolean, followLast = props.getProperty(FollowLastProp, Defaults.FollowLast.toString).toBoolean, readFromHead = props.getProperty(ReadFromHeadProp, Defaults.ReadFromHead.toString).toBoolean, batchSize = props.getProperty(BatchSizeProp, Defaults.BatchSize.toString).toLong, lineDelimiter = props.getProperty(LineDelimiterProp, Defaults.LineDelimiter.toString).toInt, removeDelimiter = props.getProperty(RemoveDelimiterProp, Defaults.RemoveDelimiter.toString).toBoolean, topic = props.getProperty(TopicProp, Defaults.Topic.toString).toString, key = props.getProperty(KeyProp, Defaults.Key.toString).toString, partition = props.getProperty(PartitionProp, Defaults.Partition.toString).toInt, compressionCodec = props.getProperty(CompressionCodecProp, Defaults.CompressionCodec.toString).toString, requiredAcks= props.getProperty(RequiredAcksProp, Defaults.RequiredAcks.toString).toInt, messageTimeoutMs = props.getProperty(MessageTimeoutMsProp, Defaults.MessageTimeoutMs.toString).toLong, regexFilterPattern = props.getProperty(RegexFilterPatternProp, Defaults.RegexFilterPattern.toString).toString, laggingMaxBytes = props.getProperty(LaggingMaxBytesProp, Defaults.LaggingMaxBytes.toString).toLong, rotateLaggingMaxSec = props.getProperty(RotateLaggingMaxSecProp, Defaults.RotateLaggingMaxSec.toString).toLong) } /** * Create a log config instance using the given properties and defaults */ def fromProps(defaults: Properties, overrides: Properties): LogConfig = { val props = new Properties(defaults) props.putAll(overrides) fromProps(props) } /** * Check that property names are valid */ def validateNames(props: Properties) { import scala.collection.JavaConverters._ for(name <- props.keys().asScala) require(LogConfig.ConfigNames.asJava.contains(name), "Unknown configuration \\"%s\\".".format(name)) } /** * Check that the given properties contain only valid log config names, and that all values can be parsed. */ def validate(props: Properties) { validateNames(props) validateLineDelimiter(props) validateTopic(props) validateRegexFilterPattern(props) LogConfig.fromProps(LogConfig().toProps, props) // check that we can parse the values } /** * Check that LineDelimiter is reasonable */ private def validateLineDelimiter(props: Properties) { val lineDelimiter = props.getProperty(LineDelimiterProp) if (lineDelimiter == null) return checkCondition(lineDelimiter.toInt >= minLineDelimiter && lineDelimiter.toInt <= maxLineDelimiter, LogkafkaConfigErrors.InvalidLineDelimiter) } /** * Check that Topic is reasonable */ private def validateTopic(props: Properties) { val topic = props.getProperty(TopicProp) require(topic != null , "Topic is null") } /** * Check that is RegexFilterPattern reasonable */ private def validateRegexFilterPattern(props: Properties) { val regexFilterPattern = props.getProperty(RegexFilterPatternProp) if (regexFilterPattern == null) return checkCondition(regexFilterPattern.length <= maxRegexFilterPatternLength, LogkafkaConfigErrors.InvalidRegexFilterPatternLength) val valid = try { s"""$regexFilterPattern""".r true } catch { case e: Exception => false } checkCondition(valid, LogkafkaConfigErrors. InvalidRegexFilterPattern) } } object LogkafkaConfigErrors { import kafka.manager.utils.UtilError class InvalidLineDelimiter private[LogkafkaConfigErrors] extends UtilError( "line delimiter is illegal, should be an decimal number between 0 and 255") class InvalidRegexFilterPattern private[LogkafkaConfigErrors] extends UtilError( "regex filter pattern is illegal, does not conform to pcre2") class InvalidRegexFilterPatternLength private[LogkafkaConfigErrors] extends UtilError( "regex filter pattern is illegal, can't be longer than " + LogConfig.maxRegexFilterPatternLength + " characters") val InvalidLineDelimiter = new InvalidLineDelimiter val InvalidRegexFilterPattern = new InvalidRegexFilterPattern val InvalidRegexFilterPatternLength = new InvalidRegexFilterPatternLength }
zheolong/kafka-manager
app/kafka/manager/utils/logkafka81/LogConfig.scala
Scala
apache-2.0
11,523
class A extends B {} class B extends C {} class C extends D {} class D extends A {} object Main { def main(args: Array[String]) { } }
tobast/compil-petitscala
tests/typing/bad/testfile-cyclic3-1.scala
Scala
gpl-3.0
134
/* * Skylark * http://skylark.io * * Copyright 2012-2017 Quantarray, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.quantarray.skylark.measure import com.quantarray.skylark.measure.measures._ /** * Volume to Length^n^ converter. * * @author Araik Grigoryan */ trait VolumeToExponentialLengthConverter extends Converter[VolumeMeasure, ExponentialLengthMeasure] { override def apply(from: VolumeMeasure, to: ExponentialLengthMeasure): Option[Double] = ⤇(from, to) match { case`bbl` ⤇ `gal` => Some(31.5) case _ => None } } object VolumeToExponentialLengthConverter { def apply(): VolumeToExponentialLengthConverter = new VolumeToExponentialLengthConverter {} }
quantarray/skylark
skylark-measure/src/main/scala/com/quantarray/skylark/measure/VolumeToExponentialLengthConverter.scala
Scala
apache-2.0
1,225
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.consumer import java.util.Properties import java.util.regex.Pattern import kafka.common.StreamEndException import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener /** * A base consumer used to abstract both old and new consumer * this class should be removed (along with BaseProducer) be removed * once we deprecate old consumer */ trait BaseConsumer { def receive(): BaseConsumerRecord def stop() def cleanup() def commit() } case class BaseConsumerRecord(topic: String, partition: Int, offset: Long, key: Array[Byte], value: Array[Byte]) class NewShinyConsumer(topic: Option[String], whitelist: Option[String], consumerProps: Properties, val timeoutMs: Long = Long.MaxValue) extends BaseConsumer { import org.apache.kafka.clients.consumer.KafkaConsumer import scala.collection.JavaConversions._ val consumer = new KafkaConsumer[Array[Byte], Array[Byte]](consumerProps) if (topic.isDefined) consumer.subscribe(List(topic.get)) else if (whitelist.isDefined) consumer.subscribe(Pattern.compile(whitelist.get), new NoOpConsumerRebalanceListener()) else throw new IllegalArgumentException("Exactly one of topic or whitelist has to be provided.") var recordIter = consumer.poll(0).iterator override def receive(): BaseConsumerRecord = { if (!recordIter.hasNext) { recordIter = consumer.poll(timeoutMs).iterator if (!recordIter.hasNext) throw new ConsumerTimeoutException } val record = recordIter.next BaseConsumerRecord(record.topic, record.partition, record.offset, record.key, record.value) } override def stop() { this.consumer.wakeup() } override def cleanup() { this.consumer.close() } override def commit() { this.consumer.commitSync() } } class OldConsumer(topicFilter: TopicFilter, consumerProps: Properties) extends BaseConsumer { import kafka.serializer.DefaultDecoder val consumerConnector = Consumer.create(new ConsumerConfig(consumerProps)) val stream: KafkaStream[Array[Byte], Array[Byte]] = consumerConnector.createMessageStreamsByFilter(topicFilter, 1, new DefaultDecoder(), new DefaultDecoder()).head val iter = stream.iterator override def receive(): BaseConsumerRecord = { if (!iter.hasNext()) throw new StreamEndException val messageAndMetadata = iter.next BaseConsumerRecord(messageAndMetadata.topic, messageAndMetadata.partition, messageAndMetadata.offset, messageAndMetadata.key, messageAndMetadata.message) } override def stop() { this.consumerConnector.shutdown() } override def cleanup() { this.consumerConnector.shutdown() } override def commit() { this.consumerConnector.commitOffsets } }
samaitra/kafka
core/src/main/scala/kafka/consumer/BaseConsumer.scala
Scala
apache-2.0
3,534
package scruffy.examples import com.sksamuel.scruffy.undertow.ScruffyUndertowHttpHandler import io.undertow.{UndertowOptions, Undertow} /** @author Stephen Samuel */ object Main extends App { val port = 8080 val scruffy = new ScruffyUndertowHttpHandler scruffy.mount(new Test1Endpoint) scruffy.mount(new Test2Endpoint) scruffy.mount(new Test6Endpoint) val server = Undertow .builder() .addHttpListener(port, "localhost") .setHandler(scruffy) .setServerOption(UndertowOptions.ALWAYS_SET_KEEP_ALIVE, java.lang.Boolean.FALSE) .setServerOption(UndertowOptions.ALWAYS_SET_DATE, java.lang.Boolean.TRUE) .setServerOption(UndertowOptions.ENABLE_CONNECTOR_STATISTICS, java.lang.Boolean.FALSE) .setServerOption(UndertowOptions.MAX_CONCURRENT_REQUESTS_PER_CONNECTION, Integer.valueOf(8)) .build() println("Starting Undertow...") server.start() println(s"Started on port [$port]. Interrupt to exit.") }
hamiltont/FrameworkBenchmarks
frameworks/Scala/scruffy/src/main/scala/scruffy/examples/Main.scala
Scala
bsd-3-clause
948
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.common import java.util.concurrent.atomic.AtomicBoolean import kafka.utils.{Logging, ZkUtils} import org.apache.zookeeper.Watcher.Event.KeeperState import org.I0Itec.zkclient.exception.ZkInterruptedException import org.I0Itec.zkclient.{IZkChildListener, IZkStateListener} import org.apache.kafka.common.utils.Time import scala.collection.JavaConverters._ /** * Handle the notificationMessage. */ trait NotificationHandler { def processNotification(notificationMessage: String) } /** * A listener that subscribes to seqNodeRoot for any child changes where all children are assumed to be sequence node * with seqNodePrefix. When a child is added under seqNodeRoot this class gets notified, it looks at lastExecutedChange * number to avoid duplicate processing and if it finds an unprocessed child, it reads its data and calls supplied * notificationHandler's processNotification() method with the child's data as argument. As part of processing these changes it also * purges any children with currentTime - createTime > changeExpirationMs. * * The caller/user of this class should ensure that they use zkUtils.subscribeStateChanges and call processAllNotifications * method of this class from ZkStateChangeListener's handleNewSession() method. This is necessary to ensure that if zk session * is terminated and reestablished any missed notification will be processed immediately. * @param zkUtils * @param seqNodeRoot * @param seqNodePrefix * @param notificationHandler * @param changeExpirationMs * @param time */ class ZkNodeChangeNotificationListener(private val zkUtils: ZkUtils, private val seqNodeRoot: String, private val seqNodePrefix: String, private val notificationHandler: NotificationHandler, private val changeExpirationMs: Long = 15 * 60 * 1000, private val time: Time = Time.SYSTEM) extends Logging { private var lastExecutedChange = -1L private val isClosed = new AtomicBoolean(false) /** * create seqNodeRoot and begin watching for any new children nodes. */ def init() { zkUtils.makeSurePersistentPathExists(seqNodeRoot) zkUtils.subscribeChildChanges(seqNodeRoot, NodeChangeListener) zkUtils.subscribeStateChanges(ZkStateChangeListener) processAllNotifications() } def close() = { isClosed.set(true) } /** * Process all changes */ def processAllNotifications() { val changes = zkUtils.getChildren(seqNodeRoot) processNotifications(changes.sorted) } /** * Process the given list of notifications */ private def processNotifications(notifications: Seq[String]) { if (notifications.nonEmpty) { info(s"Processing notification(s) to $seqNodeRoot") try { val now = time.milliseconds for (notification <- notifications) { val changeId = changeNumber(notification) if (changeId > lastExecutedChange) { val changeZnode = seqNodeRoot + "/" + notification val data = zkUtils.readDataMaybeNull(changeZnode)._1.orNull if (data != null) { notificationHandler.processNotification(data) } else { logger.warn(s"read null data from $changeZnode when processing notification $notification") } lastExecutedChange = changeId } } purgeObsoleteNotifications(now, notifications) } catch { case e: ZkInterruptedException => if (!isClosed.get) throw e } } } /** * Purges expired notifications. * * @param now * @param notifications */ private def purgeObsoleteNotifications(now: Long, notifications: Seq[String]) { for (notification <- notifications.sorted) { val notificationNode = seqNodeRoot + "/" + notification val (data, stat) = zkUtils.readDataMaybeNull(notificationNode) if (data.isDefined) { if (now - stat.getCtime > changeExpirationMs) { debug(s"Purging change notification $notificationNode") zkUtils.deletePath(notificationNode) } } } } /* get the change number from a change notification znode */ private def changeNumber(name: String): Long = name.substring(seqNodePrefix.length).toLong /** * A listener that gets invoked when a node is created to notify changes. */ object NodeChangeListener extends IZkChildListener { override def handleChildChange(path: String, notifications: java.util.List[String]) { try { import scala.collection.JavaConverters._ if (notifications != null) processNotifications(notifications.asScala.sorted) } catch { case e: Exception => error(s"Error processing notification change for path = $path and notification= $notifications :", e) } } } object ZkStateChangeListener extends IZkStateListener { override def handleNewSession() { processAllNotifications } override def handleSessionEstablishmentError(error: Throwable) { fatal("Could not establish session with zookeeper", error) } override def handleStateChanged(state: KeeperState) { debug(s"New zookeeper state: ${state}") } } }
ErikKringen/kafka
core/src/main/scala/kafka/common/ZkNodeChangeNotificationListener.scala
Scala
apache-2.0
6,172
package com.arcusys.valamis.lesson.scorm.model import com.arcusys.valamis.lesson.scorm.model.manifest._ import org.scalatest.FlatSpec import org.scalatest.matchers.ShouldMatchers class ConditionRuleItemSetTest extends FlatSpec with ShouldMatchers { val condition1 = new ConditionRuleItem(ConditionType.ActivityAttempted) val condition2 = new ConditionRuleItem(ConditionType.ObjectiveStatusKnown) val conditions = Seq(condition1, condition2) "Rule condition set" can "be costructed" in { val set = new RuleConditionSet(conditions, ConditionCombination.All) set.conditions should equal(conditions) set.combination should equal(ConditionCombination.All) } it should "not accept empty condition collection" in { intercept[IllegalArgumentException] { new RuleConditionSet(Nil, ConditionCombination.All) } } it can "be constructed with 'all' helper" in { val set = RuleConditionSet.allOf(condition1, condition2) set.conditions should equal(conditions) set.combination should equal(ConditionCombination.All) } it can "be constructed with 'any' helper" in { val set = RuleConditionSet.anyOf(condition1, condition2) set.conditions should equal(conditions) set.combination should equal(ConditionCombination.Any) } it can "be constructed with 'single' helper" in { val set = RuleConditionSet(condition1) set.conditions should equal(Seq(condition1)) set.combination should equal(ConditionCombination.Any) } }
igor-borisov/valamis
valamis-scorm-lesson/src/test/scala/com/arcusys/valamis/lesson/scorm/model/ConditionRuleItemSetTest.scala
Scala
gpl-3.0
1,492
package com.a.eye.gemini.analysis.executer.model import com.google.gson.JsonObject class RecevierPairsData extends Serializable { var messageId: Long = _ var tcpSeq: String = _ var tcpAck: String = _ var tcpTime: Long = _ var reqData: Map[String, String] = _ var resData: Map[String, String] = _ var isPair: Boolean = _ }
skywalking-developer/gemini
gemini-analysis/src/main/scala/com/a/eye/gemini/analysis/executer/model/RecevierPairsData.scala
Scala
apache-2.0
337
/* Copyright (C) 2008-2014 University of Massachusetts Amherst. This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible) http://factorie.cs.umass.edu, http://github.com/factorie Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cc.factorie.variable import cc.factorie import cc.factorie.model._ import scala.Some /** Typically Variable instances hold their value internally. Alternatively, variable values can be stored in an Assignment: a mapping from variables to their values. Note that this trait doesn't inherit directly from scala.collection.Map because we need a special type signature for 'apply' and 'get'. @author Andrew McCallum */ trait Assignment { /** All variables with values in this Assignment */ def variables: Iterable[Var] /** Return the value assigned to variable v, or throw an Error if the variable is not in this Assignment. */ def apply(v:Var): v.Value /** Return the an Option for the value assigned to variable v. If v is not contained in this Assignment return None. */ def get(v:Var): Option[v.Value] /** Return true if this Assignment has a value for variable v. */ def contains(v:Var): Boolean /** Set variables to the values specified in this assignment */ def setVariables(implicit d:DiffList): Unit = { for (v <- variables) v match { case vv:MutableVar => vv.set(this(vv)) case _ => throw new Error } } } /** An Assignment in which variable-value mappings can be changed. @author Andrew McCallum */ trait MutableAssignment extends Assignment { def update[V<:Var](variable:V, value:V#Value): Unit } /** For LabeledVar return the targetValue, otherwise return the current global assignment. @author Andrew McCallum */ object TargetAssignment extends Assignment { def variables = throw new Error("Cannot list all variables of the TargetAssignment.") def apply(v:Var): v.Value = v match { case vv:LabeledVar => vv.target.value.asInstanceOf[v.Value] case vv:Var => v.value } def get(v:Var): Option[v.Value] = Some(apply(v)) def contains(v:Var) = true override def setVariables(implicit d:DiffList): Unit = throw new Error("Cannot set a TargetAssignment. Instead use variables.setToTarget(DiffList).") } /** A MutableAssignment backed by a HashMap. @author Andrew McCallum */ class HashMapAssignment(val ignoreNonPresent: Boolean=true) extends MutableAssignment { private val map = new scala.collection.mutable.HashMap[Var, Any] def this(variables:Var*) = { this(ignoreNonPresent=true); variables.foreach(v => update(v, v.value.asInstanceOf[v.Value])) } def this(variables:Iterable[Var]) = { this(ignoreNonPresent=true); variables.foreach(v => update(v, v.value.asInstanceOf[v.Value])) } def variables = map.keys def apply(v:Var): v.Value = { get(v) match { case Some(va) => va; case None => if (!ignoreNonPresent) throw new Error("Variable not in assignment: " + v) else v.value } } def get(v:Var): Option[v.Value] = if (ignoreNonPresent) map.get(v).map(_.asInstanceOf[v.Value]) else Some(map.getOrElse(v, v.value).asInstanceOf[v.Value]) def update[V<:Var](variable:V, value:V#Value): Unit = map(variable) = value def contains(v:Var) = map.contains(v) } /** An efficient abstract Assignment of one variable. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ trait AbstractAssignment1[A<:Var] extends Assignment { def _1: A def value1: A#Value def variables = Seq(_1) // TODO Consider making this a Set. def apply(v:Var): v.Value = if (v eq _1) value1.asInstanceOf[v.Value] else v.value def get(v:Var): Option[v.Value] = if (v eq _1) Some(value1.asInstanceOf[v.Value]) else None def contains(v:Var): Boolean = if (v eq _1) true else false override def setVariables(implicit d:DiffList): Unit = _1 match { case v:MutableVar => v.set(apply(v)) } } /** An efficient Assignment of one variable. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ class Assignment1[A<:Var](val _1:A, var value1:A#Value) extends AbstractAssignment1[A] /** An efficient Assignment of one DiscreteVar. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ class DiscreteAssignment1[A<:DiscreteVar](override val _1:A, initialIntValue1:Int) extends AbstractAssignment1[A] with MutableAssignment { def this(variable:A, initialValue:A#Value) = this(variable, initialValue.intValue) private var _intValue1 = initialIntValue1 def intValue1: Int = _intValue1 def intValue1_=(i:Int): Unit = _intValue1 = i def value1: A#Value = _1.domain(_intValue1).asInstanceOf[A#Value] def value1_=(v:A#Value): Unit = _intValue1 = v.intValue //def update[V<:Var, U<:V#Value](variable:V, value:U): Unit = if (variable eq _1) _intValue1 = value.asInstanceOf[DiscreteValue].intValue else throw new Error("Cannot update DiscreteAssignment1 value for variable not present.") def update[V<:Var](variable:V, value:V#Value): Unit = if (variable eq _1) _intValue1 = value.asInstanceOf[DiscreteValue].intValue else throw new Error("Cannot update DiscreteAssignment1 value for variable not present.") } /** An efficient abstract Assignment of two variables. Values for variables not in this assigment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ trait AbstractAssignment2[A<:Var,B<:Var] extends Assignment { def _1: A def _2: B def value1: A#Value def value2: B#Value def variables = Seq(_1, _2) def apply(v:Var): v.Value = if (v eq _1) value1.asInstanceOf[v.Value] else if (v eq _2) value2.asInstanceOf[v.Value] else v.value // throw new Error("Variable not present: "+v) def get(v:Var): Option[v.Value] = if (v eq _1) Some(value1.asInstanceOf[v.Value]) else if (v eq _2) Some(value2.asInstanceOf[v.Value]) else None def contains(v:Var): Boolean = if ((v eq _1) || (v eq _2)) true else false override def setVariables(implicit d:DiffList): Unit = { _1 match { case v:MutableVar => v.set(value1.asInstanceOf[v.Value]) } _2 match { case v:MutableVar => v.set(value2.asInstanceOf[v.Value]) } } } /** An efficient Assignment of two variables. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ class Assignment2[A<:Var,B<:Var](val _1:A, var value1:A#Value, val _2:B, var value2:B#Value) extends AbstractAssignment2[A,B] /** An efficient abstract Assignment of three variables. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ trait AbstractAssignment3[A<:Var,B<:Var,C<:Var] extends Assignment { def _1: A def _2: B def _3: C def value1: A#Value def value2: B#Value def value3: C#Value def variables = Seq(_1, _2, _3) def apply(v:Var): v.Value = if (v eq _1) value1.asInstanceOf[v.Value] else if (v eq _2) value2.asInstanceOf[v.Value] else if (v eq _3) value3.asInstanceOf[v.Value] else v.value.asInstanceOf[v.Value] // throw new Error("Variable not present: "+v) def get(v:Var): Option[v.Value] = if (v eq _1) Some(value1.asInstanceOf[v.Value]) else if (v eq _2) Some(value2.asInstanceOf[v.Value]) else if (v eq _3) Some(value3.asInstanceOf[v.Value]) else None def contains(v:Var): Boolean = if ((v eq _1) || (v eq _2) || (v eq _3)) true else false override def setVariables(implicit d:DiffList): Unit = { _1 match { case v:MutableVar => v.set(value1.asInstanceOf[v.Value]) } _2 match { case v:MutableVar => v.set(value2.asInstanceOf[v.Value]) } _3 match { case v:MutableVar => v.set(value3.asInstanceOf[v.Value]) } } } /** An efficient Assignment of three variables. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ class Assignment3[A<:Var,B<:Var,C<:Var](val _1:A, var value1:A#Value, val _2:B, var value2:B#Value, val _3:C, var value3:C#Value) extends AbstractAssignment3[A,B,C] /** An efficient abstract Assignment of three variables. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ trait AbstractAssignment4[A<:Var,B<:Var,C<:Var,D<:Var] extends Assignment { def _1: A def _2: B def _3: C def _4: D def value1: A#Value def value2: B#Value def value3: C#Value def value4: D#Value def variables = Seq(_1, _2, _3, _4) def apply(v:Var): v.Value = if (v eq _1) value1.asInstanceOf[v.Value] else if (v eq _2) value2.asInstanceOf[v.Value] else if (v eq _3) value3.asInstanceOf[v.Value] else if (v eq _4) value4.asInstanceOf[v.Value] else v.value.asInstanceOf[v.Value] // throw new Error("Variable not present: "+v) def get(v:Var): Option[v.Value] = if (v eq _1) Some(value1.asInstanceOf[v.Value]) else if (v eq _2) Some(value2.asInstanceOf[v.Value]) else if (v eq _3) Some(value3.asInstanceOf[v.Value]) else if (v eq _4) Some(value4.asInstanceOf[v.Value]) else None def contains(v:Var): Boolean = if ((v eq _1) || (v eq _2) || (v eq _3) || (v eq _4)) true else false override def setVariables(implicit d:DiffList): Unit = { _1 match { case v:MutableVar => v.set(value1.asInstanceOf[v.Value]) } _2 match { case v:MutableVar => v.set(value2.asInstanceOf[v.Value]) } _3 match { case v:MutableVar => v.set(value3.asInstanceOf[v.Value]) } _4 match { case v:MutableVar => v.set(value4.asInstanceOf[v.Value]) } } } /** An efficient Assignment of three variables. Values for variables not in this assignment are taken from those variables themselves (the "global" assignment). @author Andrew McCallum */ class Assignment4[A<:Var,B<:Var,C<:Var,D<:Var](val _1:A, var value1:A#Value, val _2:B, var value2:B#Value, val _3:C, var value3:C#Value, val _4:D, var value4:D#Value) extends AbstractAssignment4[A,B,C,D] // TODO Consider making this inherit from MutableAssignment -akm /** An Assignment whose values are those stored inside the variables themselves. @author Andrew McCallum */ object GlobalAssignment extends Assignment { def variables = throw new Error("Cannot list all variables of the global Assignment.") def apply(v:Var): v.Value = v.value def get(v:Var): Option[v.Value] = Some(v.value) def contains(v:Var) = true override def setVariables(implicit d:DiffList): Unit = {} } /** An Assignment backed by a sequence of assignments. The returned value will be from the first Assignment in the sequence to contain the variable. @author Andrew McCallum */ class AssignmentStack(val assignment:Assignment, val next:AssignmentStack = null) extends Assignment { def variables = assignment.variables ++ next.variables protected def apply(v:Var, s:AssignmentStack): v.Value = if (s.next eq null) s.assignment(v) else s.assignment.get(v).getOrElse(apply(v, s.next)) def apply(v:Var): v.Value = apply(v, this) /** Return the an Option for the value assigned to variable v. If v is not contained in this Assignment return None. */ def get(v: Var, s: AssignmentStack): Option[v.Value] = { val o = assignment.get(v) if (o != None) o else if (s.next ne null) s.next.get(v).asInstanceOf else None } def get(v:Var): Option[v.Value] = get(v, this) def contains(v:Var) = if (assignment.contains(v)) true else next.contains(v) /** Returns a new Assignment stack, the result of prepending Assignment a. */ def +:(a:Assignment): AssignmentStack = new AssignmentStack(a, this) } /** Allows an iterator over the assignments to the neighbors of a factor (optionally specifying the variables that should vary) @author Sameer Singh */ @deprecated("May be removed in future due to inefficiency.", "Before 2014-11-17") object AssignmentIterator { def assignments1[N1 <: Var](f1: Factor1[N1], varying: Set[Var]): Iterator[Assignment] = assignments1(f1._1, varying) def assignments1[N1 <: Var](v1:N1, varying: Set[Var]): Iterator[Assignment] = { if (varying(v1)) //v1.domain.iterator.map(value => new Assignment1(v1, value.asInstanceOf[v1.Value])) v1 match { case v1:DiscreteVar => v1.domain.iterator.map(value => new Assignment1(v1, value.asInstanceOf)) } else Iterator.empty } def assignments2[N1 <: Var, N2 <: Var](f2: Factor2[N1, N2], varying: Set[Var]): Iterator[Assignment] = assignments2(f2._1, f2._2, varying) def assignments2[N1 <: Var, N2 <: Var](v1:N1, v2:N2, varying: Set[Var]): Iterator[Assignment] = { val values1 = if (varying.contains(v1)) v1.asInstanceOf[DiscreteVar].domain else Seq(v1.value.asInstanceOf[DiscreteValue]) val values2 = if (varying.contains(v2)) v2.asInstanceOf[DiscreteVar].domain else Seq(v2.value.asInstanceOf[DiscreteValue]) (for (val1 <- values1; val2 <- values2) yield new Assignment2(v1, val1.asInstanceOf[v1.Value], v2, val2.asInstanceOf[v2.Value])).iterator } def assignments3[N1 <: Var, N2 <: Var, N3 <: Var](f3: Factor3[N1, N2, N3], varying: Set[Var]): Iterator[Assignment] = assignments3(f3._1, f3._2, f3._3, varying) def assignments3[N1 <: Var, N2 <: Var, N3 <: Var](v1:N1, v2:N2, v3:N3, varying: Set[Var]): Iterator[Assignment] = { val values1 = if (varying.contains(v1)) v1.asInstanceOf[DiscreteVar].domain else Seq(v1.value.asInstanceOf[DiscreteValue]) val values2 = if (varying.contains(v2)) v2.asInstanceOf[DiscreteVar].domain else Seq(v2.value.asInstanceOf[DiscreteValue]) val values3 = if (varying.contains(v3)) v3.asInstanceOf[DiscreteVar].domain else Seq(v3.value.asInstanceOf[DiscreteValue]) (for (val1 <- values1; val2 <- values2; val3 <- values3) yield new Assignment3(v1, val1.asInstanceOf[v1.Value], v2, val2.asInstanceOf[v2.Value], v3, val3.asInstanceOf[v3.Value])).iterator } def assignments4[N1 <: Var, N2 <: Var, N3 <: Var, N4 <: Var](f4: Factor4[N1, N2, N3, N4], varying: Set[Var]): Iterator[Assignment] = assignments4(f4._1, f4._2, f4._3, f4._4, varying) def assignments4[N1 <: Var, N2 <: Var, N3 <: Var, N4 <: Var](v1:N1, v2:N2, v3:N3, v4:N4, varying: Set[Var]): Iterator[Assignment] = { val values1 = if (varying.contains(v1)) v1.asInstanceOf[DiscreteVar].domain else Seq(v1.value.asInstanceOf[DiscreteValue]) val values2 = if (varying.contains(v2)) v2.asInstanceOf[DiscreteVar].domain else Seq(v2.value.asInstanceOf[DiscreteValue]) val values3 = if (varying.contains(v3)) v3.asInstanceOf[DiscreteVar].domain else Seq(v3.value.asInstanceOf[DiscreteValue]) val values4 = if (varying.contains(v4)) v4.asInstanceOf[DiscreteVar].domain else Seq(v4.value.asInstanceOf[DiscreteValue]) (for (val1 <- values1; val2 <- values2; val3 <- values3; val4 <- values4) yield new Assignment4(v1, val1.asInstanceOf[v1.Value], v2, val2.asInstanceOf[v2.Value], v3, val3.asInstanceOf[v3.Value], v4, val4.asInstanceOf[v4.Value])).iterator } def assignments(vars: Seq[Var]): Iterator[Assignment] = { if(vars.length == 1) assignments1(vars.head, vars.toSet) else if(vars.length == 2) assignments2(vars(0), vars(1), vars.toSet) else if(vars.length == 3) assignments3(vars(0), vars(1), vars(2), vars.toSet) else if(vars.length == 4) assignments4(vars(0), vars(1), vars(2), vars(3), vars.toSet) else throw new Error ("To many variables to iterate over (>4): " + vars.length) } def assignments(f: Factor, varying: Set[Var]): Iterator[Assignment] = { f match { // Factor 1 case f1: Factor1[_] => assignments1(f1, varying) // Factor 2 case f2: Factor2[_, _] => assignments2(f2, varying) // Factor 3 case f3: Factor3[_, _, _] => assignments3(f3, varying) // Factor 4 case f4: Factor4[_, _, _, _] => assignments4(f4, varying) } } }
hlin117/factorie
src/main/scala/cc/factorie/variable/Assignment.scala
Scala
apache-2.0
16,416
/*********************************************************************** * Copyright (c) 2013-2020 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.kudu.tools.ingest import com.beust.jcommander.Parameters import org.locationtech.geomesa.kudu.data.KuduDataStore import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand.KuduParams import org.locationtech.geomesa.kudu.tools.ingest.KuduDeleteFeaturesCommand.KuduDeleteFeaturesParams import org.locationtech.geomesa.tools.data.DeleteFeaturesCommand import org.locationtech.geomesa.tools.data.DeleteFeaturesCommand.DeleteFeaturesParams class KuduDeleteFeaturesCommand extends DeleteFeaturesCommand[KuduDataStore] with KuduDataStoreCommand { override val params = new KuduDeleteFeaturesParams } object KuduDeleteFeaturesCommand { @Parameters(commandDescription = "Delete features from a GeoMesa schema") class KuduDeleteFeaturesParams extends DeleteFeaturesParams with KuduParams }
aheyne/geomesa
geomesa-kudu/geomesa-kudu-tools/src/main/scala/org/locationtech/geomesa/kudu/tools/ingest/KuduDeleteFeaturesCommand.scala
Scala
apache-2.0
1,356
/** * Copyright 2017 Interel * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package core3.http.controllers.local import com.typesafe.config.Config import core3.database.containers.core.LocalUser import core3.database.containers.core.LocalUser.UserType import core3.database.dals.DatabaseAbstractionLayer import core3.http.controllers.ServiceControllerBase import core3.http.handlers import core3.security.LocalAuthUserToken import core3.utils.ActionScope import play.api.cache.SyncCacheApi import play.api.mvc.{Action, AnyContent, Request, Result} import play.api.{Environment, Logger} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal /** * Play controller definition for implementing non-user services. * <br><br> * Notes: * - Uses a local user database as an authentication and authorization provider. * - Provides action generators for user- and client-aware actions. * * @param cache the session cache to be used\\ * @param authConfig authentication configuration * @param db the database to be used for querying local users */ class ServiceController(cache: SyncCacheApi, authConfig: Config, db: DatabaseAbstractionLayer) (implicit environment: Environment, ec: ExecutionContext) extends ServiceControllerBase[LocalAuthUserToken] { private val instanceSalt = authConfig.getString("instanceSalt") private val passwordIterations = authConfig.getInt("passwordIterations") private val keyLength = authConfig.getInt("keyLength") private val placeholderPasswordSize = authConfig.getInt("placeholderPasswordSize") private val placeholderSaltSize = authConfig.getInt("saltSize") private val sessionTokenSize = authConfig.getInt("sessionTokenSize") private val localTokenExpiration = authConfig.getInt("localTokenExpiration").minutes private val random = new java.security.SecureRandom private val auditLogger = Logger("audit") private def checkParameterSize(parameterName: String, paramSize: Int, minSize: Int): Unit = { if (paramSize <= minSize) { auditLogger.warn(s"core3.http.controllers.local.ServiceController::() > The size of configuration parameter [$parameterName] may be too small for adequate security!") } } checkParameterSize("instanceSalt", instanceSalt.length, 64) checkParameterSize("passwordIterations", passwordIterations, 10000) checkParameterSize("keyLength", keyLength, 64) checkParameterSize("placeholderPasswordSize", placeholderPasswordSize, 64) checkParameterSize("saltSize", placeholderSaltSize, 64) checkParameterSize("sessionTokenSize", sessionTokenSize, 128) override def UserAwareAction( requiredScope: ActionScope, okHandler: (Request[AnyContent], LocalAuthUserToken) => Future[Result], unauthorizedHandler: Option[(Request[AnyContent]) => Future[Result]], forbiddenHandler: Option[(Request[AnyContent]) => Future[Result]] ): Action[AnyContent] = { Action.async { request => val callUnauthorizedHandler: () => Future[Result] = () => { unauthorizedHandler match { case Some(handler) => handler(request) case None => handlers.JSON.unauthorized(request) } } val clientToken = cache.get[LocalAuthUserToken](s"${request.headers.get(core3.http.HeaderNames.CLIENT_SESSION_TOKEN).getOrElse("")}_data") val delegatedUserID = request.headers.get(core3.http.HeaderNames.USER_DELEGATION_TOKEN).getOrElse("None") clientToken match { case Some(token) => //client authenticated if (token.permissions.contains(requiredScope)) { val cachedUserToken = cache.get[LocalAuthUserToken](s"${delegatedUserID}_data") cachedUserToken match { case Some(userToken) => auditLogger.info(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${token.userID}] @ [${request.remoteAddress}] on behalf of user [${userToken.userID}] was accepted.") okHandler(request, userToken) case None => val userQuery = db.queryDatabase("LocalUser", "getByUserID", Map("userID" -> delegatedUserID)).map { result => result.headOption match { case Some(user) => user.asInstanceOf[LocalUser] case None => throw new RuntimeException(s"Failed to retrieve user with id [$delegatedUserID]") } } userQuery.flatMap { user => //authentication successful val userToken = LocalAuthUserToken(user, "none") cache.set(s"${delegatedUserID}_data", userToken, localTokenExpiration) auditLogger.info(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${token.userID}] @ [${request.remoteAddress}] on behalf of user [${user.userID}] was accepted.") okHandler(request, userToken) }.recoverWith { case NonFatal(e) => //user data is missing auditLogger.error(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested with missing user data from client [${token.userID}] @ [${request.remoteAddress}] on behalf of user [$delegatedUserID] failed with message [${e.getMessage}].") callUnauthorizedHandler() } } } else { //client not allowed to request action auditLogger.error(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${token.userID}] @ [${request.remoteAddress}] on behalf of user [unknown] was not allowed.") forbiddenHandler match { case Some(handler) => handler(request) case None => handlers.JSON.forbidden(request) } } case None => core3.security.getBasicAuthCredentials(request) match { case Some((userID, password)) => core3.security.authenticateUser(userID, password, random, db, authConfig).flatMap { client => if (client.userType == UserType.Service) { val sessionToken = core3.security.getRandomString(sessionTokenSize, random) val clientToken = LocalAuthUserToken(client, sessionToken) cache.set(s"${sessionToken}_data", clientToken, localTokenExpiration) if(clientToken.permissions.contains(requiredScope)) { val userQuery = db.queryDatabase("LocalUser", "getByUserID", Map("userID" -> delegatedUserID)).map { result => result.headOption match { case Some(user) => user.asInstanceOf[LocalUser] case None => throw new RuntimeException(s"Failed to retrieve user with id [$delegatedUserID]") } } userQuery.flatMap { user => //authentication successful val userToken = LocalAuthUserToken(user, "none") cache.set(s"${delegatedUserID}_data", userToken, localTokenExpiration) auditLogger.info(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${client.userID}] @ [${request.remoteAddress}] on behalf of user [${user.userID}] was accepted.") okHandler(request, userToken).map(_.withHeaders(core3.http.HeaderNames.CLIENT_SESSION_TOKEN -> sessionToken)) }.recoverWith { case NonFatal(e) => //user data is missing auditLogger.error(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested with missing user data from client [${client.userID}] @ [${request.remoteAddress}] on behalf of user [$delegatedUserID] failed with message [${e.getMessage}].") callUnauthorizedHandler() } } else { //client not allowed to request action auditLogger.error(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${clientToken.userID}] @ [${request.remoteAddress}] on behalf of user [unknown] was not allowed.") (forbiddenHandler match { case Some(handler) => handler(request) case None => handlers.JSON.forbidden(request) }).map(_.withHeaders(core3.http.HeaderNames.CLIENT_SESSION_TOKEN -> sessionToken)) } } else { throw new RuntimeException(s"User [${client.userID}] with unexpected type [${client.userType}] attempted login") } }.recoverWith { case NonFatal(e) => //client authentication failed auditLogger.error(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"failed authentication for client [unknown] @ [${request.remoteAddress}] with message [${e.getMessage}].") e.printStackTrace() callUnauthorizedHandler() } case None => //client auth data is missing auditLogger.error(s"core3.http.controllers.local.ServiceController::UserAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested with missing authentication data from client [unknown] @ [${request.remoteAddress}] on behalf of user [unknown].") callUnauthorizedHandler() } } } } override def ClientAwareAction( requiredScope: ActionScope, okHandler: (Request[AnyContent], String) => Future[Result], unauthorizedHandler: Option[(Request[AnyContent]) => Future[Result]], forbiddenHandler: Option[(Request[AnyContent]) => Future[Result]] ): Action[AnyContent] = { Action.async { request => val callUnauthorizedHandler: () => Future[Result] = () => { unauthorizedHandler match { case Some(handler) => handler(request) case None => handlers.JSON.unauthorized(request) } } val clientToken = cache.get[LocalAuthUserToken](s"${request.headers.get(core3.http.HeaderNames.CLIENT_SESSION_TOKEN).getOrElse("")}_data") clientToken match { case Some(token) => //client authenticated if (token.permissions.contains(requiredScope)) { //client allowed to request action auditLogger.info(s"core3.http.controllers.local.ServiceController::ClientAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${token.userID}] @ [${request.remoteAddress}] was accepted.") okHandler(request, token.userID) } else { //client not allowed to request action auditLogger.error(s"core3.http.controllers.local.ServiceController::ClientAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${token.userID}] @ [${request.remoteAddress}] was not allowed.") forbiddenHandler match { case Some(handler) => handler(request) case None => handlers.JSON.forbidden(request) } } case None => core3.security.getBasicAuthCredentials(request) match { case Some((userID, password)) => core3.security.authenticateUser(userID, password, random, db, authConfig).flatMap { client => if (client.userType == UserType.Service) { val sessionToken = core3.security.getRandomString(sessionTokenSize, random) val clientToken = LocalAuthUserToken(client, sessionToken) cache.set(s"${sessionToken}_data", clientToken, localTokenExpiration) if (clientToken.permissions.contains(requiredScope)) { //client allowed to request action auditLogger.info(s"core3.http.controllers.local.ServiceController::ClientAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${clientToken.userID}] @ [${request.remoteAddress}] was accepted.") okHandler(request, clientToken.userID).map(_.withHeaders(core3.http.HeaderNames.CLIENT_SESSION_TOKEN -> sessionToken)) } else { //client not allowed to request action auditLogger.error(s"core3.http.controllers.local.ServiceController::ClientAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested from client [${clientToken.userID}] @ [${request.remoteAddress}] was not allowed.") (forbiddenHandler match { case Some(handler) => handler(request) case None => handlers.JSON.forbidden(request) }).map(_.withHeaders(core3.http.HeaderNames.CLIENT_SESSION_TOKEN -> sessionToken)) } } else { throw new RuntimeException(s"User [${client.userID}] with unexpected type [${client.userType}] attempted login") } }.recoverWith { case NonFatal(e) => //client authentication failed auditLogger.error(s"core3.http.controllers.local.ServiceController::ClientAwareAction > Action [${request.method}] @ [${request.uri}] " + s"failed authentication for client [unknown] @ [${request.remoteAddress}] with message [${e.getMessage}].") e.printStackTrace() callUnauthorizedHandler() } case None => //client auth data is missing auditLogger.error(s"core3.http.controllers.local.ServiceController::ClientAwareAction > Action [${request.method}] @ [${request.uri}] " + s"requested with missing client token from client [unknown] @ [${request.remoteAddress}].") callUnauthorizedHandler() } } } } override def PublicAction(okHandler: (Request[AnyContent], Option[String]) => Future[Result]): Action[AnyContent] = { Action.async { request => val clientID = cache.get[LocalAuthUserToken](s"${request.headers.get(core3.http.HeaderNames.CLIENT_SESSION_TOKEN).getOrElse("")}_data").map(_.userID) clientID match { case Some(id) => auditLogger.info(s"core3.http.controllers.local.ServiceController::PublicAction > Access to [${request.method}] @ [${request.uri}] " + s"allowed for client [$id] @ [${request.remoteAddress}].") case None => auditLogger.info(s"core3.http.controllers.local.ServiceController::PublicAction > Access to [${request.method}] @ [${request.uri}] " + s"allowed for client [unknown] @ [${request.remoteAddress}].") } okHandler(request, clientID) } } }
Interel-Group/core3
src/main/scala/core3/http/controllers/local/ServiceController.scala
Scala
apache-2.0
16,886
package ch.fram.medlineGeo.explore import javax.inject.Inject import ch.fram.medlineGeo.explore.utils.DiskCache import play.api.mvc.{Action, Controller} /** * Created by alex on 28/09/15. */ class CountryCountController @Inject()(cached: DiskCache) extends Controller { def index = Action { Ok("CountryCountController") } def countByYear(year: Int) = //cached(req => "rest-" + req.uri) { Action { req => val ret = cached.getOrElse(req.uri)("[\\n" + CountryCountService.countByYear(year).toJSON.collect.mkString(",\\n")+ "\\n]") Ok(ret ).as("application/json") } // } }
alexmasselot/medlineGeoBackend
app/ch/fram/medlineGeo/explore/CountryCountController.scala
Scala
mit
608
package scala_docs.zipfile import java.util.zip.ZipEntry sealed trait CompressionMethod object CompressionMethod { case object Stored extends CompressionMethod case object Deflated extends CompressionMethod def apply(i: Int): Option[CompressionMethod] = i match { case -1 => None case ZipEntry.STORED => Some(Stored) case ZipEntry.DEFLATED => Some(Deflated) case _ => throw new Exception(s"Unrecognized compression mode: $i") } }
chris-martin/scala-docs
src/main/scala/scala_docs/zipfile/CompressionMethod.scala
Scala
cc0-1.0
460
package com.danielwestheide.kontextfrei import org.apache.spark.Partitioner object DummyRangePartitioner extends Partitioner { override def numPartitions: Int = 2 override def getPartition(key: Any): Int = { key match { case x: Int => if (x < 0) 0 else 1 case _ => 0 } } }
dwestheide/kontextfrei
core/src/test/scala/com/danielwestheide/kontextfrei/DummyRangePartitioner.scala
Scala
apache-2.0
326
package users /** * Created by harsh on 10/8/14. */ //import java.util.Enumeration import address.Address import enumspckg.AddressType.AddressType import enumspckg.AuthType.AuthType import enumspckg.Country.Country import enumspckg.Gender.Gender import enumspckg.Industry.Industry import enumspckg.MaritalStatus._ import enumspckg.Registration.Registration import enumspckg.Salutation.Salutation import enumspckg.SocialNetwork.SocialNetwork import enumspckg.State.State import enumspckg.Status.Status import info.AdditionalInfo import org.joda.time.DateTime import utils.UserMetaData case class Contact(name: String, email: String, mobile: String, telephone: String, fax: Option[String]) case class Authentication(password: String, hasher: String, type_of_auth: AuthType, active: Boolean) trait ID { def id: Long } trait User { def username: String def status: Status } trait BaseUser extends User { def metadata: UserMetaData //def id:ID def active: Boolean } case class Guest(username: String, status: Status, metadata: UserMetaData, id: Long, active: Boolean) extends BaseUser with ID case class Consumer(first_name: String, last_name: String, username: String, city: String, profession: String, address: Address, country: Country, metadata: UserMetaData, state: String, wedding_anniversary: DateTime, active: Boolean, dob: DateTime, salutation: Salutation, mobile: String, gender: Gender, status: Status, telephone: String, email: String, avatar: String, marital_status: MaritalStatus, id: Long) extends BaseUser with AdditionalInfo with ID case class TenantUser(username: String, status: Status, organisation: String, url: String, industry: Industry, contacts: Seq[Contact], TAN: String, company_size: (Int, Int), company_registration: Registration, social_links: SocialNetwork, id: Long) extends User with ID
hardmettle/slick-postgress-samples
app/models/users/GlobalUsers.scala
Scala
apache-2.0
1,990
package hercules.config.masters /** * TODO I'm not sure that this is necessary. Maybe better to just use to * the Typesafe Config directly instead? /JD 20141113 * * Configure the master actor * * @param snapshotInterval how often to snapshot the state. */ case class MasterActorConfig(enablePersistence: Boolean, snapshotInterval: Int) { }
johandahlberg/hercules
src/main/scala/hercules/config/masters/MasterActorConfig.scala
Scala
mit
348
package spark.scheduler.cluster import spark.TaskState.TaskState import java.nio.ByteBuffer import spark.util.SerializableBuffer sealed trait StandaloneClusterMessage extends Serializable // Master to slaves case class LaunchTask(task: TaskDescription) extends StandaloneClusterMessage case class RegisteredSlave(sparkProperties: Seq[(String, String)]) extends StandaloneClusterMessage case class RegisterSlaveFailed(message: String) extends StandaloneClusterMessage // Slaves to master case class RegisterSlave(slaveId: String, host: String, cores: Int) extends StandaloneClusterMessage case class StatusUpdate(slaveId: String, taskId: Long, state: TaskState, data: SerializableBuffer) extends StandaloneClusterMessage object StatusUpdate { /** Alternate factory method that takes a ByteBuffer directly for the data field */ def apply(slaveId: String, taskId: Long, state: TaskState, data: ByteBuffer): StatusUpdate = { StatusUpdate(slaveId, taskId, state, new SerializableBuffer(data)) } } // Internal messages in master case object ReviveOffers extends StandaloneClusterMessage case object StopMaster extends StandaloneClusterMessage
ankurdave/arthur
core/src/main/scala/spark/scheduler/cluster/StandaloneClusterMessage.scala
Scala
bsd-3-clause
1,156
/* * Copyright 2014 Treode, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.treode.disk import scala.util.Random import com.treode.async.{Async, Scheduler}, Async.supply import com.treode.disk.stubs.{StubDisk, StubDiskChecks} import com.treode.tags.{Intensive, Periodic} import org.scalatest.FreeSpec class SegmentLedgerSpec extends FreeSpec with StubDiskChecks { private class SegmentLedgerTracker (nobjects: Int, npages: Int) extends Tracker { type Medic = SegmentLedgerMedic type Struct = SegmentLedger class UserObject (id: ObjectId, private var page: Int) { private val tracker = SegmentLedgerTracker.this private var freeable = Set.empty [Int] private var group = 0 def alloc (ledger: SegmentLedger) (implicit random: Random): Async [Unit] = { while (page == 0) { freeable += group group += 1 page = random.nextInt (npages + 1) } page -= 1 tracker.alloc (ledger, id, group, random.nextInt (nbytes)) }} private val disk = 0 private val seg = 0 private val typ: TypeId = 0 private val nbytes = 100 private var objects = Map.empty [ObjectId, UserObject] private var allocating = Map.empty [(ObjectId, Long), Int] .withDefaultValue (0) private var allocated = Map.empty [(ObjectId, Long), Int] .withDefaultValue (0) def recover () (implicit random: Random, scheduler: Scheduler, recovery: DiskRecovery): Medic = new SegmentLedgerMedic (recovery) def launch (medic: Medic) (implicit launch: DiskLaunch): Async [Struct] = medic.close() .map (_._1) def getObject (id: ObjectId) (implicit random: Random): UserObject = objects.get (id) match { case Some (obj) => obj case None => val obj = new UserObject (id, random.nextInt (npages)) objects += id -> obj obj } def randomObject () (implicit random: Random): UserObject = getObject (ObjectId (random.nextInt (nobjects))) def alloc (ledger: SegmentLedger, obj: ObjectId, gen: Long, bytes: Int): Async [Unit] = { val id = (obj, gen) val tally = new PageTally tally.alloc (typ, obj, gen, bytes) allocating += id -> (allocating (id) + bytes) for { _ <- ledger.alloc (disk, seg, 0, tally) } yield { allocated += id -> (allocated (id) + bytes) }} def batches ( nbatches: Int, nallocs: Int, ledger: SegmentLedger ) (implicit random: Random, scheduler: Scheduler ): Async [Unit] = { var i = 0 scheduler.whilst (i < nbatches) { i += 1 Async.count (nallocs) (randomObject().alloc (ledger)) }} def verify (crashed: Boolean, ledger: Struct) (implicit scheduler: Scheduler): Async [Unit] = supply { val docket = ledger.docket for ((page, bytes) <- docket; id = (page.obj, page.gen)) assert (allocated (id) <= bytes && bytes <= allocating (id)) for ((id @ (obj, grp), bytes) <- allocated) assert (bytes <= docket (typ, obj, grp, disk, seg)) } override def toString = s"new SegmentLedgerTracker ($nobjects, $npages)" } private class SegmentLedgerPhase (nbatches: Int, nallocs: Int) extends Effect [SegmentLedgerTracker] { def start ( tracker: SegmentLedgerTracker, ledger: SegmentLedger ) (implicit random: Random, scheduler: Scheduler, disk: StubDisk ): Async [Unit] = tracker.batches (nbatches, nallocs, ledger) override def toString = s"new SegmentLedgerPhase ($nbatches, $nallocs)" } "The SegmentLedger should record and recover" - { for { nbatches <- Seq (0, 1, 2, 3) nallocs <- Seq (0, 1, 2, 3) if (nbatches != 0 && nallocs != 0 || nbatches == nallocs) } s"for $nbatches batches of $nallocs allocations" taggedAs (Intensive, Periodic) in { manyScenarios (new SegmentLedgerTracker (1, 3), new SegmentLedgerPhase (nbatches, nallocs)) } for { (nbatches, nallocs, nobjects) <- Seq ((7, 7, 10), (20, 20, 40), (20, 100, 10)) } s"for $nbatches batches of $nallocs allocations" taggedAs (Intensive, Periodic) in { manyScenarios (new SegmentLedgerTracker (nobjects, 7), new SegmentLedgerPhase (nbatches, nallocs)) }}}
Treode/store
disk/test/com/treode/disk/SegmentLedgerSpec.scala
Scala
apache-2.0
4,866
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This code is a modified version of the original Spark 1.0.2 implementation. * */ package com.massivedatascience.clusterer import com.massivedatascience.clusterer.MultiKMeansClusterer.ClusteringWithDistortion import com.massivedatascience.linalg.{ MutableWeightedVector, WeightedVector } import org.apache.spark.SparkContext._ import org.apache.spark.rdd.RDD import scala.collection.mutable.ArrayBuffer /** * A K-Means clustering implementation that performs multiple K-means clusterings simultaneously, * returning the one with the lowest cost. * */ //scalastyle:off @deprecated("use ColumnTrackingKMeans", "1.2.0") class MultiKMeans extends MultiKMeansClusterer { def cluster( maxIterations: Int, pointOps: BregmanPointOps, data: RDD[BregmanPoint], c: Seq[IndexedSeq[BregmanCenter]]): Seq[ClusteringWithDistortion] = { val centers = c.map(_.toArray).toArray def cluster(): Seq[ClusteringWithDistortion] = { val runs = centers.length val active = Array.fill(runs)(true) val costs = Array.fill(runs)(0.0) var activeRuns = new ArrayBuffer[Int] ++ (0 until runs) var iteration = 0 /* * Execute iterations of Lloyd's algorithm until all runs have converged. */ while (iteration < maxIterations && activeRuns.nonEmpty) { // remove the empty clusters logInfo(s"iteration $iteration") val activeCenters = activeRuns.map(r => centers(r)).toArray if (log.isInfoEnabled) { for (r <- 0 until activeCenters.length) logInfo(s"run ${activeRuns(r)} has ${activeCenters(r).length} centers") } // Find the sum and count of points mapping to each center val (centroids: Array[((Int, Int), WeightedVector)], runDistortion) = getCentroids(data, activeCenters) if (log.isInfoEnabled) { for (run <- activeRuns) logInfo(s"run $run distortion ${runDistortion(run)}") } for (run <- activeRuns) active(run) = false for (((runIndex: Int, clusterIndex: Int), cn: MutableWeightedVector) <- centroids) { val run = activeRuns(runIndex) if (cn.weight == 0.0) { active(run) = true centers(run)(clusterIndex) = null.asInstanceOf[BregmanCenter] } else { val centroid = cn.asImmutable active(run) = active(run) || pointOps.centerMoved(pointOps.toPoint(centroid), centers(run)(clusterIndex)) centers(run)(clusterIndex) = pointOps.toCenter(centroid) } } // filter out null centers for (r <- activeRuns) centers(r) = centers(r).filter(_ != null) // update distortions and print log message if run completed during this iteration for ((run, runIndex) <- activeRuns.zipWithIndex) { costs(run) = runDistortion(runIndex) if (!active(run)) logInfo(s"run $run finished in ${iteration + 1} iterations") } activeRuns = activeRuns.filter(active(_)) iteration += 1 } costs.zip(centers).map { case (x, y) => ClusteringWithDistortion(x, y.toIndexedSeq) } } def getCentroids( data: RDD[BregmanPoint], activeCenters: Array[Array[BregmanCenter]]): (Array[((Int, Int), WeightedVector)], Array[Double]) = { val sc = data.sparkContext val runDistortion = Array.fill(activeCenters.length)(sc.accumulator(0.0)) val bcActiveCenters = sc.broadcast(activeCenters) val result = data.mapPartitions[((Int, Int), WeightedVector)] { points => val bcCenters = bcActiveCenters.value val centers = bcCenters.map(c => Array.fill(c.length)(pointOps.make())) for (point <- points; (clusters, run) <- bcCenters.zipWithIndex) { val (cluster, cost) = pointOps.findClosest(clusters, point) runDistortion(run) += cost centers(run)(cluster).add(point) } val contribution = for ( (clusters, run) <- bcCenters.zipWithIndex; (contrib, cluster) <- clusters.zipWithIndex ) yield { ((run, cluster), centers(run)(cluster).asImmutable) } contribution.iterator }.aggregateByKey(pointOps.make())( (x, y) => x.add(y), (x, y) => x.add(y) ).map(x => (x._1, x._2.asImmutable)).collect() bcActiveCenters.unpersist() (result, runDistortion.map(x => x.localValue)) } cluster() } } //scalastyle:on
derrickburns/generalized-kmeans-clustering
src/main/scala/com/massivedatascience/clusterer/MultiKMeans.scala
Scala
apache-2.0
5,246
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.api.r import java.io.File import java.util.Arrays import org.apache.spark.{SparkEnv, SparkException} private[spark] object RUtils { // Local path where R binary packages built from R source code contained in the spark // packages specified with "--packages" or "--jars" command line option reside. var rPackages: Option[String] = None /** * Get the SparkR package path in the local spark distribution. */ def localSparkRPackagePath: Option[String] = { val sparkHome = sys.env.get("SPARK_HOME").orElse(sys.props.get("spark.test.home")) sparkHome.map( Seq(_, "R", "lib").mkString(File.separator) ) } /** * Get the list of paths for R packages in various deployment modes, of which the first * path is for the SparkR package itself. The second path is for R packages built as * part of Spark Packages, if any exist. Spark Packages can be provided through the * "--packages" or "--jars" command line options. * * This assumes that Spark properties `spark.master` and `spark.submit.deployMode` * and environment variable `SPARK_HOME` are set. */ def sparkRPackagePath(isDriver: Boolean): Seq[String] = { val (master, deployMode) = if (isDriver) { (sys.props("spark.master"), sys.props("spark.submit.deployMode")) } else { val sparkConf = SparkEnv.get.conf (sparkConf.get("spark.master"), sparkConf.get("spark.submit.deployMode", "client")) } val isYarnCluster = master != null && master.contains("yarn") && deployMode == "cluster" val isYarnClient = master != null && master.contains("yarn") && deployMode == "client" // In YARN mode, the SparkR package is distributed as an archive symbolically // linked to the "sparkr" file in the current directory and additional R packages // are distributed as an archive symbolically linked to the "rpkg" file in the // current directory. // // Note that this does not apply to the driver in client mode because it is run // outside of the cluster. if (isYarnCluster || (isYarnClient && !isDriver)) { val sparkRPkgPath = new File("sparkr").getAbsolutePath val rPkgPath = new File("rpkg") if (rPkgPath.exists()) { Seq(sparkRPkgPath, rPkgPath.getAbsolutePath) } else { Seq(sparkRPkgPath) } } else { // Otherwise, assume the package is local // TODO: support this for Mesos val sparkRPkgPath = localSparkRPackagePath.getOrElse { throw new SparkException("SPARK_HOME not set. Can't locate SparkR package.") } if (!rPackages.isEmpty) { Seq(sparkRPkgPath, rPackages.get) } else { Seq(sparkRPkgPath) } } } /** Check if R is installed before running tests that use R commands. */ def isRInstalled: Boolean = { try { val builder = new ProcessBuilder(Arrays.asList("R", "--version")) builder.start().waitFor() == 0 } catch { case e: Exception => false } } }
chenc10/Spark-PAF
core/src/main/scala/org/apache/spark/api/r/RUtils.scala
Scala
apache-2.0
3,829
package com.karasiq.shadowcloud.config import com.karasiq.common.configs.ConfigImplicits import com.karasiq.shadowcloud.ui.UIProvider import com.karasiq.shadowcloud.ui.passwords.PasswordProvider import com.typesafe.config.Config private[shadowcloud] final case class UIConfig(rootConfig: Config, passwordProvider: Class[PasswordProvider], uiProvider: Class[UIProvider]) extends WrappedConfig private[shadowcloud] object UIConfig extends WrappedConfigFactory[UIConfig] with ConfigImplicits { override def apply(config: Config): UIConfig = { val passwordProvider: Class[PasswordProvider] = config.getClass("password-provider") val uiProvider: Class[UIProvider] = config.getClass("ui-provider") UIConfig(config, passwordProvider, uiProvider) } }
Karasiq/shadowcloud
core/src/main/scala/com/karasiq/shadowcloud/config/UIConfig.scala
Scala
apache-2.0
778
package dotty.tools.dotc.util import scala.language.unsafeNulls import java.net.URLClassLoader import java.nio.file.Paths import dotty.tools.repl.AbstractFileClassLoader object ClasspathFromClassloader { /** Attempt to recreate a classpath from a classloader. * * BEWARE: with exotic enough classloaders, this may not work at all or do * the wrong thing. */ def apply(cl: ClassLoader): String = { val classpathBuff = List.newBuilder[String] def collectClassLoaderPaths(cl: ClassLoader): Unit = { if (cl != null) { cl match { case cl: URLClassLoader => // This is wrong if we're in a subclass of URLClassLoader // that filters loading classes from its parent ¯\\_(ツ)_/¯ collectClassLoaderPaths(cl.getParent) // Parent classloaders are searched before their child, so the part of // the classpath coming from the child is added at the _end_ of the // classpath. classpathBuff ++= cl.getURLs.iterator.map(url => Paths.get(url.toURI).toAbsolutePath.toString) case _ => if cl.getClass.getName == classOf[AbstractFileClassLoader].getName then // HACK: We can't just collect the classpath from arbitrary parent // classloaders since the current classloader might intentionally // filter loading classes from its parent (for example // BootFilteredLoader in the sbt launcher does this and we really // don't want to include the scala-library that sbt depends on // here), but we do need to look at the parent of the REPL // classloader, so we special case it. We can't do this using a type // test since the REPL classloader class itself is normally loaded // with a different classloader. collectClassLoaderPaths(cl.getParent) else if cl eq ClassLoader.getSystemClassLoader then // HACK: For Java 9+, if the classloader is an AppClassLoader then use the classpath from the system // property `java.class.path`. classpathBuff += System.getProperty("java.class.path") } } } collectClassLoaderPaths(cl) classpathBuff.result().mkString(java.io.File.pathSeparator) } }
dotty-staging/dotty
compiler/src/dotty/tools/dotc/util/ClasspathFromClassloader.scala
Scala
apache-2.0
2,357
package eu.shiftforward trait Tracer { type Probe = (String, Connector[_]) def setProbes(probes: Probe*) def trace(currentTime: Long) def close() { } } object DummyTracer extends Tracer { def setProbes(probes: Probe*) { } def trace(currentTime: Long) { } } class ConsoleTracer extends Tracer { var lastValues = List[Any]() var probes: List[Probe] = List() private def prettyPrintSignal(h: Boolean, s: Boolean) = (h, s) match { case (false, false) => "│ " case (false, true) => "└─┐" case (true, true) => " │" case (true, false) => "┌─┘" } def setProbes(probes: Probe*) { this.probes = probes.toList println("time\\t" + probes.map(_._1).mkString("\\t")) } def currentValues = probes.map(_._2) def trace(currentTime: Long) { val signals = currentValues.map(_.getSignal) val values = if (!lastValues.isEmpty) lastValues.zip(signals).map { case (h: Boolean, s: Boolean) => prettyPrintSignal(h, s) case (_, b: Iterable[Boolean]) => b.map(s => if (s) 1 else 0).mkString.reverse } else signals.map { case s: Boolean => prettyPrintSignal(s, s) case b: Iterable[Boolean] => b.map(s => if (s) 1 else 0).mkString.reverse } println(currentTime + "\\t" + values.mkString("\\t")) lastValues = signals } } class VCDTracer(file: java.io.File, secondaryTracer: Tracer = new ConsoleTracer) extends Tracer { var probes: List[Probe] = List() val pw = new java.io.PrintWriter(file) val date = new java.util.Date().toString pw.println(s"""$$date | $date |$$end |$$timescale | 1ms |$$end""".stripMargin) val symbolList = (33 to 126).map(_.asInstanceOf[Char].toString).toList def setProbes(ps: Probe*) { this.probes = ps.toList probes.zip(symbolList).map { case ((id, conn: Bus), symbol) => s"$$var reg ${conn.size} $symbol $id [${conn.size - 1}:0] $$end" case ((id, _: Wire), symbol) => s"$$var reg 1 $symbol $id $$end" } foreach pw.println pw.println("$enddefinitions $end") secondaryTracer.setProbes(ps: _*) } def currentValues = probes.map(_._2) def trace(currentTime: Long) { pw.println("#" + currentTime) currentValues.zip(symbolList).map { case (v: Wire, s) => (if (v.getSignal) 1 else 0) + s case (v: Bus, s) => s"b$v $s" } foreach pw.println secondaryTracer.trace(currentTime) } override def close() { pw.flush() pw.close() secondaryTracer.close() } }
hugoferreira/from-zero-to-computer
src/main/scala/eu/shiftforward/Tracer.scala
Scala
mit
2,560
package processframework import akka.actor.{ Actor, ActorContext, ActorRef, ActorSystem, Props } import akka.testkit.{ TestActor, TestKit, TestProbe } object ProcessStepTest { case object Completed extends Process.Event case object Response case class Command(state: Int) def testStep(executeProbe: ActorRef)(implicit _actorContext: ActorContext) = new ProcessStep[Int] { implicit def context: ActorContext = _actorContext def execute()(implicit process: ActorRef) = { state ⇒ executeProbe ! Command(state) } def receiveCommand = { case Response ⇒ Completed } def updateState = { case Completed ⇒ state ⇒ markDone(state + 1) } } } class ProcessStepTest extends BaseSpec with ProcessStepTestSupport[Int, ProcessStep[Int]] { import ProcessStepTest._ def processProbeWithState[S](state: S): TestProbe = { val processProbe = TestProbe() processProbe.setAutoPilot(new TestActor.AutoPilot { def run(sender: ActorRef, msg: Any): TestActor.AutoPilot = msg match { case Process.GetState ⇒ sender ! state; TestActor.KeepRunning case Completed ⇒ testActor ! Completed; TestActor.KeepRunning } }) processProbe } def createTestProbe(): TestProbe = { val serviceMockProbe = TestProbe() serviceMockProbe.setAutoPilot(new TestActor.AutoPilot { def run(sender: ActorRef, msg: Any): TestActor.AutoPilot = { msg match { case cmd: Command ⇒ sender ! Response TestActor.NoAutoPilot } } }) serviceMockProbe } def createProcessStep(executeProbe: TestProbe)(implicit context: ActorContext) = testStep(executeProbe.ref) "A ProcessStep" should { "handle happy flow" in { // GIVEN val processProbe = processProbeWithState(432) val step = processStep() // Execute the logic (do call) val future = step.run()(processProbe.ref, system.dispatcher, scala.reflect.classTag[Int]) // When response is received by process, it will send this to the steps, so they can handle it val event = expectMsg(Completed) step.isCompleted should not be true // The event can be used to retrieve an updateState function val updateStateFunction = step.handleUpdateState(event) step.isCompleted should not be true updateStateFunction(646) should be(647) step.isCompleted shouldBe true } "run the step logic (which does not complete the step)" in { // GIVEN val processProbe = processProbeWithState(432) val step = processStep() // WHEN val future = step.run()(processProbe.ref, system.dispatcher, scala.reflect.classTag[Int]) // THEN testProbe.expectMsg(Command(432)) step.isCompleted shouldBe false } "complete returns an updateState function, which completes the step" in { // GIVEN val processProbe = processProbeWithState(432) val step = processStep() // WHEN val event = step.receiveCommand(Response) // THEN step.isCompleted shouldBe false // WHEN RUN val result = step.handleUpdateState(event)(7) // THEN result should be(8) step.isCompleted shouldBe true } "can only be completed once" in { // GIVEN val processProbe = processProbeWithState(432) val step = processStep() // WHEN step.handleUpdateState(Completed)(3) // AND THEN intercept[MatchError] { step.handleUpdateState(Completed) } } } }
jgordijn/process
src/test/scala/processframework/ProcessStepTest.scala
Scala
apache-2.0
3,597
package outer package nested object indent1: object inner: def x: Int = 1 end inner val y: Int = 2 end indent1
som-snytt/dotty
tests/pos-special/sourcepath/outer/nested/indent1.scala
Scala
apache-2.0
122
package ca.uwaterloo.gsd.rangeFix import org.kiama.rewriting.Rewriter import org.kiama.rewriting.Rewriter._ object MyRewriter { def everywheretdWithGuard(g: => Rewriter.Strategy, s: => Rewriter.Strategy) : Rewriter.Strategy = g <+ (attempt(s) <* (all (everywheretdWithGuard(g, s)))) def everywherebuWithGuard(g: => Rewriter.Strategy, s: => Rewriter.Strategy) : Rewriter.Strategy = g <+ (all (everywherebuWithGuard(g, s)) <* attempt(s)) }
matachi/rangeFix
src/main/scala/util/rewriting.scala
Scala
mit
531
/* * Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * */ package com.hypertino.hyperbus.transport import com.hypertino.hyperbus.util.IdGenerator import org.scalatest.{FreeSpec, Matchers} class IdGeneratorSpec extends FreeSpec with Matchers { "IdGenerator " - { "Should generate sorted sequence" in { val list = 0 until 5000 map { i ⇒ IdGenerator.create() } list.sortBy(l ⇒ l) should equal(list) // sequence is sorted list.foreach { l ⇒ list.count(_ == l) should equal(1) //evey one is unique l.length == 30 } } } }
hypertino/hyperbus
hyperbus/src/test/scala/com/hypertino/hyperbus/transport/IdGeneratorSpec.scala
Scala
mpl-2.0
787
import com.mike_burns.ohlaunch import org.scalatest.matchers.ShouldMatchers import org.scalatest.Spec class Specs extends Spec with ShouldMatchers { describe("a spec") { it("should do something") { } } }
mike-burns/ohlaunch
src/test/scala/Specs.scala
Scala
bsd-3-clause
216
package nodes.learning import java.io.File import breeze.linalg._ import breeze.numerics.{exp, log => bLog} import breeze.stats._ import org.apache.spark.rdd.RDD import pipelines.{Logging, Estimator} import utils.MatrixUtils /** * Fit a Gaussian Mixture model to Data. * * @param k Number of centers to estimate. */ case class BensGMMEstimator(k: Int, maxIterations: Int = 100, minClusterSize: Int = 40, stopTolerance: Double = 1e-4, weightThreshold: Double = 1e-4, smallVarianceThreshold: Double = 1e-2, absoluteVarianceThreshold: Double = 0) extends Estimator[DenseVector[Double], DenseVector[Double]] with Logging { require(minClusterSize > 0, "Minimum cluster size must be positive") require(maxIterations > 0, "maxIterations must be positive") /** * Currently this model works on items that fit in local memory. * @param samples * @return A PipelineNode (Transformer) which can be called on new data. */ def fit(samples: RDD[DenseVector[Double]]): GaussianMixtureModel = { fit(samples.collect) } /** * Fit a Gaussian mixture model with `k` centers to a sample array. * * @param samples Sample Array - all elements must be the same size. * @return A Gaussian Mixture Model. */ def fit(samples: Array[DenseVector[Double]]): GaussianMixtureModel = { require(samples.length > 0, "Must have training points") val X = MatrixUtils.rowsToMatrix(samples) // Use KMeans++ initialization to get the GMM center initializations val kMeansModel = KMeansPlusPlusEstimator(k, 1).fit(X) val centerAssignment = kMeansModel.apply(X) val assignMass = sum(centerAssignment, Axis._0).toDenseVector // gather data statistics val numSamples = X.rows val numFeatures = X.cols val meanGlobal = mean(X(::, *)) val XSq = X :* X val varianceGlobal = mean(XSq(::, *)) - (meanGlobal :* meanGlobal) // set the lower bound for the gmm_variance val gmmVarLB = max(smallVarianceThreshold * varianceGlobal, absoluteVarianceThreshold) var gmmWeights = assignMass.asDenseMatrix / numSamples.toDouble var gmmMeans = diag(assignMass.map(1.0 / _)) * (centerAssignment.t * X) var gmmVars = diag(assignMass.map(1.0 / _)) * (centerAssignment.t * XSq) - (gmmMeans :* gmmMeans) // Threshold small variances gmmVars = max(gmmVars, DenseMatrix.ones[Double](k, 1) * gmmVarLB) // Run EM! val curCost = DenseVector.zeros[Double](maxIterations) var iter = 0 var costImproving = true var largeEnoughClusters = true while ((iter < maxIterations) && costImproving && largeEnoughClusters) { /* E-STEP */ /* compute the squared malhanobis distance for each gaussian. sq_mal_dist(i,j) || x_i - mu_j||_Lambda^2. I am the master of computing Euclidean distances without for loops. */ val sqMahlist = (XSq * gmmVars.map(0.5 / _).t) - (X * (gmmMeans :/ gmmVars).t) + (DenseMatrix.ones[Double](numSamples, 1) * (sum(gmmMeans :* gmmMeans :/ gmmVars, Axis._1).t :* 0.5)) // compute the log likelihood of the approximate posterior val llh = DenseMatrix.ones[Double](numSamples, 1) * (-0.5 * numFeatures * math.log(2 * math.Pi) - 0.5 * sum(bLog(gmmVars), Axis._1).t + bLog(gmmWeights)) - sqMahlist /* compute the log likelihood of the model using the incremental approach suggested by the Xerox folks. The key thing here is that for all intents and purposes, log(1+exp(t)) is equal to zero is t<-30 and equal to t if t>30 */ var lseLLH = llh(::, 0) var cluster = 1 while (cluster < k) { val deltaLSE = lseLLH - llh(::, cluster) val deltaLSEThreshold = min(max(deltaLSE, -30.0), 30.0) val lseIncrement = (deltaLSE.map(x => if (x > 30.0) 1.0 else 0.0) :* deltaLSE) + (deltaLSE.map(x => if (x > -30.0) 1.0 else 0.0) :* deltaLSE.map(x => if (x <= 30.0) 1.0 else 0.0) :* bLog(exp(deltaLSEThreshold) + 1.0)) lseLLH = llh(::, cluster) + lseIncrement cluster += 1 } curCost(iter) = mean(lseLLH) logInfo(s"iter=$iter, llh=${curCost(iter)}") // Check if we're making progress if (iter > 0) { costImproving = (curCost(iter) - curCost(iter - 1)) >= stopTolerance * math.abs(curCost(iter - 1)) } logInfo(s"cost improving: $costImproving") if (costImproving) { /* if we make progress, update our pseudo-likelihood for the E-step. by shifting the llh to be peaked at 0, we avoid nasty numerical overflows. */ llh(::, *) -= max(llh(*, ::)) exp.inPlace(llh) llh(::, *) :/= sum(llh, Axis._1) var q = llh /* aggressive posterior thresholding: suggested by Xerox. Thresholds the really small weights to sparsify the assignments. */ q = q.map(x => if (x > weightThreshold) x else 0.0) q(::, *) :/= sum(q, Axis._1) /* M-STEP */ val qSum = sum(q, Axis._0).toDenseVector if (qSum.toArray.exists(_ < minClusterSize)) { logWarning("Unbalanced clustering, try less centers") largeEnoughClusters = false } else { gmmWeights = qSum.asDenseMatrix / numSamples.toDouble gmmMeans = diag(qSum.map(1.0 / _)) * (q.t * X) gmmVars = diag(qSum.map(1.0 / _)) * (q.t * XSq) - (gmmMeans :* gmmMeans) // Threshold small variances gmmVars = max(gmmVars, DenseMatrix.ones[Double](k, 1) * gmmVarLB) } } iter += 1 } GaussianMixtureModel(gmmMeans.t, gmmVars.t, gmmWeights.toDenseVector) } }
shivaram/keystone
src/main/scala/nodes/learning/BensGMMEstimator.scala
Scala
apache-2.0
5,611
package com.dominikgruber.fpinscala.chapter08 import com.dominikgruber.fpinscala.chapter05.Stream import com.dominikgruber.fpinscala.chapter06.{RNG, Simple} import Prop._ case class Prop(run: (MaxSize, TestCases, RNG) => Result) { /** * Exercise 09 * Now that we have a representation of Prop, implement &&, and || for * manipulating Prop values. While we can implement &&, notice that in the * case of failure we aren't informed which property was responsible, the left * or the right. * (Optional): Can you devise a way of handling this, perhaps by allowing Prop * values to be assigned a tag or label which gets displayed in the event of a * failure? */ def &&(p: Prop): Prop = Prop { (max, testCases, rng) => { (this.run(max, testCases, rng), p.run(max, testCases, rng)) match { case (None, None) => None case (Some((failedCase1, success1)), Some((failedCase2, success2))) => Some((failedCase1 + " / " + failedCase2, success1 + success2)) case (Some((failedCase1, success1)), _) => Some((failedCase1, success1)) case (_, Some((failedCase2, success2))) => Some((failedCase2, success2)) } } } def ||(p: Prop): Prop = Prop { (max, testCases, rng) => { (this.run(max, testCases, rng), p.run(max, testCases, rng)) match { case (None, _) => None case (_, None) => None case (Some((failedCase1, success1)), Some((failedCase2, success2))) => Some((failedCase1 + " / " + failedCase2, success1 + success2)) } } } } object Prop { type FailedCase = String type SuccessCount = Int type TestCases = Int type Result = Option[(FailedCase, SuccessCount)] type MaxSize = Int def forAll[A](as: Gen[A])(f: A => Boolean): Prop = Prop { (max, n, rng) => randomStream(as)(rng).zip(Stream.from(0)).take(n).map { case (a, i) => try { if (f(a)) None else Some((a.toString, i)) } catch { case e: Exception => Some((buildMsg(a, e), i)) } }.find(_.isDefined).getOrElse(None) } def randomStream[A](g: Gen[A])(rng: RNG): Stream[A] = Stream.unfold(rng)(rng => Some(g.sample.run(rng))) def buildMsg[A](s: A, e: Exception): String = s"test case: $s\\n" + s"generated an exception: ${e.getMessage}\\n" + s"stack trace:\\n ${e.getStackTrace.mkString("\\n")}" def forAll[A](g: SGen[A])(f: A => Boolean): Prop = forAll(g(_))(f) def forAll[A](g: Int => Gen[A])(f: A => Boolean): Prop = Prop { (max, n, rng) => val casesPerSize = (n + (max - 1)) / max val props: Stream[Prop] = Stream.from(0).take((n min max) + 1).map(i => forAll(g(i))(f)) val prop: Prop = props.map(p => Prop { (max, _, rng) => p.run(max, casesPerSize, rng) }).toList.reduce(_ && _) prop.run(max,n,rng) } val smallInt = Gen.choose(-10,10) val maxProp = forAll(Gen.listOf1(smallInt)) { l => val max = l.max !l.exists(_ > max) } def run(p: Prop, maxSize: Int = 100, testCases: Int = 100, rng: RNG = Simple(System.currentTimeMillis)): Unit = p.run(maxSize, testCases, rng) match { case Some((msg, n)) => println(s"! Falsified after $n passed tests:\\n $msg") case None => println(s"+ OK, passed $testCases tests.") } /** * Exercise 15 * Write a property to verify the behavior of List.sorted, which you can use * to sort (among other things) a List[Int]. For instance, List(2,1,3).sorted * is equal to List(1,2,3). */ def sortedListProp = forAll(Gen.listOf1(smallInt)) { l => l.sorted == l.sorted.reverse.reverse // Simplistic Check } }
TheDom/functional-programming-in-scala
src/main/scala/com/dominikgruber/fpinscala/chapter08/Prop.scala
Scala
mit
3,625
package controllers import play.api.mvc.{Action, Controller} import play.api.libs.json._ import apidoc.models.json._ import apidoc.models.sample.SampleImpl object Samples extends Controller { def getGuid(guid: String) = Action { val sampleObj = SampleImpl(guid, "Gilt World") Ok(Json.toJson(sampleObj)) } }
gilt/activator-gilt-app
svc/app/controllers/Samples.scala
Scala
apache-2.0
322
package io.catbird.util.effect import java.time.Instant import java.util.concurrent.TimeUnit import cats.effect.Clock import com.twitter.util.Await import io.catbird.util.Rerunnable import org.scalatest.Outcome import org.scalatest.concurrent.Eventually import org.scalatest.funsuite.FixtureAnyFunSuite /** * We'll use `eventually` and a reasonably big tolerance here to prevent CI from failing if it is a bit slow. * * Technically the implementation is just an extremely thin wrapper around `System.currentTimeMillis()` * and `System.nanoTime()` so as long as the result is the same order of magnitude (and therefore the * unit-conversion is correct) we should be fine. */ class RerunnableClockSuite extends FixtureAnyFunSuite with Eventually { protected final class FixtureParam { def now: Instant = Instant.now() val clock: Clock[Rerunnable] = RerunnableClock() } test("Retrieval of real time") { f => eventually { val result = Await.result( f.clock.realTime(TimeUnit.MILLISECONDS).map(Instant.ofEpochMilli).run ) assert(java.time.Duration.between(result, f.now).abs().toMillis < 50) } } test("Retrieval of monotonic time") { f => eventually { val result = Await.result( f.clock.monotonic(TimeUnit.NANOSECONDS).run ) val durationBetween = Math.abs(System.nanoTime() - result) assert(TimeUnit.MILLISECONDS.convert(durationBetween, TimeUnit.NANOSECONDS) < 5) } } override protected def withFixture(test: OneArgTest): Outcome = withFixture(test.toNoArgTest(new FixtureParam)) }
travisbrown/catbird
effect/src/test/scala/io/catbird/util/effect/RerunnableClockSuite.scala
Scala
apache-2.0
1,590
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.analysis import javax.annotation.Nullable import scala.annotation.tailrec import scala.collection.mutable import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ /** * A collection of [[Rule]] that can be used to coerce differing types that participate in * operations into compatible ones. * * Notes about type widening / tightest common types: Broadly, there are two cases when we need * to widen data types (e.g. union, binary comparison). In case 1, we are looking for a common * data type for two or more data types, and in this case no loss of precision is allowed. Examples * include type inference in JSON (e.g. what's the column's data type if one row is an integer * while the other row is a long?). In case 2, we are looking for a widened data type with * some acceptable loss of precision (e.g. there is no common type for double and decimal because * double's range is larger than decimal, and yet decimal is more precise than double, but in * union we would cast the decimal into double). */ object TypeCoercion { def typeCoercionRules(conf: SQLConf): List[Rule[LogicalPlan]] = InConversion(conf) :: WidenSetOperationTypes :: PromoteStrings(conf) :: DecimalPrecision :: BooleanEquality :: FunctionArgumentConversion :: ConcatCoercion(conf) :: MapZipWithCoercion :: EltCoercion(conf) :: CaseWhenCoercion :: IfCoercion :: StackCoercion :: Division :: ImplicitTypeCasts :: DateTimeOperations :: WindowFrameCoercion :: Nil // See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types. // The conversion for integral and floating point types have a linear widening hierarchy: val numericPrecedence = IndexedSeq( ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType) /** * Case 1 type widening (see the classdoc comment above for TypeCoercion). * * Find the tightest common type of two types that might be used in a binary expression. * This handles all numeric types except fixed-precision decimals interacting with each other or * with primitive types, because in that case the precision and scale of the result depends on * the operation. Those rules are implemented in [[DecimalPrecision]]. */ val findTightestCommonType: (DataType, DataType) => Option[DataType] = { case (t1, t2) if t1 == t2 => Some(t1) case (NullType, t1) => Some(t1) case (t1, NullType) => Some(t1) case (t1: IntegralType, t2: DecimalType) if t2.isWiderThan(t1) => Some(t2) case (t1: DecimalType, t2: IntegralType) if t1.isWiderThan(t2) => Some(t1) // Promote numeric types to the highest of the two case (t1: NumericType, t2: NumericType) if !t1.isInstanceOf[DecimalType] && !t2.isInstanceOf[DecimalType] => val index = numericPrecedence.lastIndexWhere(t => t == t1 || t == t2) Some(numericPrecedence(index)) case (_: TimestampType, _: DateType) | (_: DateType, _: TimestampType) => Some(TimestampType) case (t1, t2) => findTypeForComplex(t1, t2, findTightestCommonType) } /** Promotes all the way to StringType. */ private def stringPromotion(dt1: DataType, dt2: DataType): Option[DataType] = (dt1, dt2) match { case (StringType, t2: AtomicType) if t2 != BinaryType && t2 != BooleanType => Some(StringType) case (t1: AtomicType, StringType) if t1 != BinaryType && t1 != BooleanType => Some(StringType) case _ => None } /** * This function determines the target type of a comparison operator when one operand * is a String and the other is not. It also handles when one op is a Date and the * other is a Timestamp by making the target type to be String. */ private def findCommonTypeForBinaryComparison( dt1: DataType, dt2: DataType, conf: SQLConf): Option[DataType] = (dt1, dt2) match { // We should cast all relative timestamp/date/string comparison into string comparisons // This behaves as a user would expect because timestamp strings sort lexicographically. // i.e. TimeStamp(2013-01-01 00:00 ...) < "2014" = true case (StringType, DateType) => Some(StringType) case (DateType, StringType) => Some(StringType) case (StringType, TimestampType) => Some(StringType) case (TimestampType, StringType) => Some(StringType) case (StringType, NullType) => Some(StringType) case (NullType, StringType) => Some(StringType) // Cast to TimestampType when we compare DateType with TimestampType // if conf.compareDateTimestampInTimestamp is true // i.e. TimeStamp('2017-03-01 00:00:00') eq Date('2017-03-01') = true case (TimestampType, DateType) => if (conf.compareDateTimestampInTimestamp) Some(TimestampType) else Some(StringType) case (DateType, TimestampType) => if (conf.compareDateTimestampInTimestamp) Some(TimestampType) else Some(StringType) // There is no proper decimal type we can pick, // using double type is the best we can do. // See SPARK-22469 for details. case (n: DecimalType, s: StringType) => Some(DoubleType) case (s: StringType, n: DecimalType) => Some(DoubleType) case (l: StringType, r: AtomicType) if r != StringType => Some(r) case (l: AtomicType, r: StringType) if l != StringType => Some(l) case (l, r) => None } private def findTypeForComplex( t1: DataType, t2: DataType, findTypeFunc: (DataType, DataType) => Option[DataType]): Option[DataType] = (t1, t2) match { case (ArrayType(et1, containsNull1), ArrayType(et2, containsNull2)) => findTypeFunc(et1, et2).map { et => ArrayType(et, containsNull1 || containsNull2 || Cast.forceNullable(et1, et) || Cast.forceNullable(et2, et)) } case (MapType(kt1, vt1, valueContainsNull1), MapType(kt2, vt2, valueContainsNull2)) => findTypeFunc(kt1, kt2) .filter { kt => !Cast.forceNullable(kt1, kt) && !Cast.forceNullable(kt2, kt) } .flatMap { kt => findTypeFunc(vt1, vt2).map { vt => MapType(kt, vt, valueContainsNull1 || valueContainsNull2 || Cast.forceNullable(vt1, vt) || Cast.forceNullable(vt2, vt)) } } case (StructType(fields1), StructType(fields2)) if fields1.length == fields2.length => val resolver = SQLConf.get.resolver fields1.zip(fields2).foldLeft(Option(new StructType())) { case (Some(struct), (field1, field2)) if resolver(field1.name, field2.name) => findTypeFunc(field1.dataType, field2.dataType).map { dt => struct.add(field1.name, dt, field1.nullable || field2.nullable || Cast.forceNullable(field1.dataType, dt) || Cast.forceNullable(field2.dataType, dt)) } case _ => None } case _ => None } /** * The method finds a common type for data types that differ only in nullable, containsNull * and valueContainsNull flags. If the input types are too different, None is returned. */ def findCommonTypeDifferentOnlyInNullFlags(t1: DataType, t2: DataType): Option[DataType] = { if (t1 == t2) { Some(t1) } else { findTypeForComplex(t1, t2, findCommonTypeDifferentOnlyInNullFlags) } } def findCommonTypeDifferentOnlyInNullFlags(types: Seq[DataType]): Option[DataType] = { if (types.isEmpty) { None } else { types.tail.foldLeft[Option[DataType]](Some(types.head)) { case (Some(t1), t2) => findCommonTypeDifferentOnlyInNullFlags(t1, t2) case _ => None } } } /** * Case 2 type widening (see the classdoc comment above for TypeCoercion). * * i.e. the main difference with [[findTightestCommonType]] is that here we allow some * loss of precision when widening decimal and double, and promotion to string. */ def findWiderTypeForTwo(t1: DataType, t2: DataType): Option[DataType] = { findTightestCommonType(t1, t2) .orElse(findWiderTypeForDecimal(t1, t2)) .orElse(stringPromotion(t1, t2)) .orElse(findTypeForComplex(t1, t2, findWiderTypeForTwo)) } /** * Whether the data type contains StringType. */ def hasStringType(dt: DataType): Boolean = dt match { case StringType => true case ArrayType(et, _) => hasStringType(et) // Add StructType if we support string promotion for struct fields in the future. case _ => false } private def findWiderCommonType(types: Seq[DataType]): Option[DataType] = { // findWiderTypeForTwo doesn't satisfy the associative law, i.e. (a op b) op c may not equal // to a op (b op c). This is only a problem for StringType or nested StringType in ArrayType. // Excluding these types, findWiderTypeForTwo satisfies the associative law. For instance, // (TimestampType, IntegerType, StringType) should have StringType as the wider common type. val (stringTypes, nonStringTypes) = types.partition(hasStringType(_)) (stringTypes.distinct ++ nonStringTypes).foldLeft[Option[DataType]](Some(NullType))((r, c) => r match { case Some(d) => findWiderTypeForTwo(d, c) case _ => None }) } /** * Similar to [[findWiderTypeForTwo]] that can handle decimal types, but can't promote to * string. If the wider decimal type exceeds system limitation, this rule will truncate * the decimal type before return it. */ private[analysis] def findWiderTypeWithoutStringPromotionForTwo( t1: DataType, t2: DataType): Option[DataType] = { findTightestCommonType(t1, t2) .orElse(findWiderTypeForDecimal(t1, t2)) .orElse(findTypeForComplex(t1, t2, findWiderTypeWithoutStringPromotionForTwo)) } def findWiderTypeWithoutStringPromotion(types: Seq[DataType]): Option[DataType] = { types.foldLeft[Option[DataType]](Some(NullType))((r, c) => r match { case Some(d) => findWiderTypeWithoutStringPromotionForTwo(d, c) case None => None }) } /** * Finds a wider type when one or both types are decimals. If the wider decimal type exceeds * system limitation, this rule will truncate the decimal type. If a decimal and other fractional * types are compared, returns a double type. */ private def findWiderTypeForDecimal(dt1: DataType, dt2: DataType): Option[DataType] = { (dt1, dt2) match { case (t1: DecimalType, t2: DecimalType) => Some(DecimalPrecision.widerDecimalType(t1, t2)) case (t: IntegralType, d: DecimalType) => Some(DecimalPrecision.widerDecimalType(DecimalType.forType(t), d)) case (d: DecimalType, t: IntegralType) => Some(DecimalPrecision.widerDecimalType(DecimalType.forType(t), d)) case (_: FractionalType, _: DecimalType) | (_: DecimalType, _: FractionalType) => Some(DoubleType) case _ => None } } /** * Check whether the given types are equal ignoring nullable, containsNull and valueContainsNull. */ def haveSameType(types: Seq[DataType]): Boolean = { if (types.size <= 1) { true } else { val head = types.head types.tail.forall(_.sameType(head)) } } private def castIfNotSameType(expr: Expression, dt: DataType): Expression = { if (!expr.dataType.sameType(dt)) { Cast(expr, dt) } else { expr } } /** * Widens numeric types and converts strings to numbers when appropriate. * * Loosely based on rules from "Hadoop: The Definitive Guide" 2nd edition, by Tom White * * The implicit conversion rules can be summarized as follows: * - Any integral numeric type can be implicitly converted to a wider type. * - All the integral numeric types, FLOAT, and (perhaps surprisingly) STRING can be implicitly * converted to DOUBLE. * - TINYINT, SMALLINT, and INT can all be converted to FLOAT. * - BOOLEAN types cannot be converted to any other type. * - Any integral numeric type can be implicitly converted to decimal type. * - two different decimal types will be converted into a wider decimal type for both of them. * - decimal type will be converted into double if there float or double together with it. * * Additionally, all types when UNION-ed with strings will be promoted to strings. * Other string conversions are handled by PromoteStrings. * * Widening types might result in loss of precision in the following cases: * - IntegerType to FloatType * - LongType to FloatType * - LongType to DoubleType * - DecimalType to Double * * This rule is only applied to Union/Except/Intersect */ object WidenSetOperationTypes extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp { case s @ Except(left, right, isAll) if s.childrenResolved && left.output.length == right.output.length && !s.resolved => val newChildren: Seq[LogicalPlan] = buildNewChildrenWithWiderTypes(left :: right :: Nil) assert(newChildren.length == 2) Except(newChildren.head, newChildren.last, isAll) case s @ Intersect(left, right, isAll) if s.childrenResolved && left.output.length == right.output.length && !s.resolved => val newChildren: Seq[LogicalPlan] = buildNewChildrenWithWiderTypes(left :: right :: Nil) assert(newChildren.length == 2) Intersect(newChildren.head, newChildren.last, isAll) case s: Union if s.childrenResolved && s.children.forall(_.output.length == s.children.head.output.length) && !s.resolved => val newChildren: Seq[LogicalPlan] = buildNewChildrenWithWiderTypes(s.children) s.makeCopy(Array(newChildren)) } /** Build new children with the widest types for each attribute among all the children */ private def buildNewChildrenWithWiderTypes(children: Seq[LogicalPlan]): Seq[LogicalPlan] = { require(children.forall(_.output.length == children.head.output.length)) // Get a sequence of data types, each of which is the widest type of this specific attribute // in all the children val targetTypes: Seq[DataType] = getWidestTypes(children, attrIndex = 0, mutable.Queue[DataType]()) if (targetTypes.nonEmpty) { // Add an extra Project if the targetTypes are different from the original types. children.map(widenTypes(_, targetTypes)) } else { // Unable to find a target type to widen, then just return the original set. children } } /** Get the widest type for each attribute in all the children */ @tailrec private def getWidestTypes( children: Seq[LogicalPlan], attrIndex: Int, castedTypes: mutable.Queue[DataType]): Seq[DataType] = { // Return the result after the widen data types have been found for all the children if (attrIndex >= children.head.output.length) return castedTypes.toSeq // For the attrIndex-th attribute, find the widest type findWiderCommonType(children.map(_.output(attrIndex).dataType)) match { // If unable to find an appropriate widen type for this column, return an empty Seq case None => Seq.empty[DataType] // Otherwise, record the result in the queue and find the type for the next column case Some(widenType) => castedTypes.enqueue(widenType) getWidestTypes(children, attrIndex + 1, castedTypes) } } /** Given a plan, add an extra project on top to widen some columns' data types. */ private def widenTypes(plan: LogicalPlan, targetTypes: Seq[DataType]): LogicalPlan = { val casted = plan.output.zip(targetTypes).map { case (e, dt) if e.dataType != dt => Alias(Cast(e, dt), e.name)() case (e, _) => e } Project(casted, plan) } } /** * Promotes strings that appear in arithmetic expressions. */ case class PromoteStrings(conf: SQLConf) extends TypeCoercionRule { private def castExpr(expr: Expression, targetType: DataType): Expression = { (expr.dataType, targetType) match { case (NullType, dt) => Literal.create(null, targetType) case (l, dt) if (l != dt) => Cast(expr, targetType) case _ => expr } } override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Skip nodes who's children have not been resolved yet. case e if !e.childrenResolved => e case a @ BinaryArithmetic(left @ StringType(), right) if right.dataType != CalendarIntervalType => a.makeCopy(Array(Cast(left, DoubleType), right)) case a @ BinaryArithmetic(left, right @ StringType()) if left.dataType != CalendarIntervalType => a.makeCopy(Array(left, Cast(right, DoubleType))) // For equality between string and timestamp we cast the string to a timestamp // so that things like rounding of subsecond precision does not affect the comparison. case p @ Equality(left @ StringType(), right @ TimestampType()) => p.makeCopy(Array(Cast(left, TimestampType), right)) case p @ Equality(left @ TimestampType(), right @ StringType()) => p.makeCopy(Array(left, Cast(right, TimestampType))) case p @ BinaryComparison(left, right) if findCommonTypeForBinaryComparison(left.dataType, right.dataType, conf).isDefined => val commonType = findCommonTypeForBinaryComparison(left.dataType, right.dataType, conf).get p.makeCopy(Array(castExpr(left, commonType), castExpr(right, commonType))) case Abs(e @ StringType()) => Abs(Cast(e, DoubleType)) case Sum(e @ StringType()) => Sum(Cast(e, DoubleType)) case Average(e @ StringType()) => Average(Cast(e, DoubleType)) case StddevPop(e @ StringType()) => StddevPop(Cast(e, DoubleType)) case StddevSamp(e @ StringType()) => StddevSamp(Cast(e, DoubleType)) case UnaryMinus(e @ StringType()) => UnaryMinus(Cast(e, DoubleType)) case UnaryPositive(e @ StringType()) => UnaryPositive(Cast(e, DoubleType)) case VariancePop(e @ StringType()) => VariancePop(Cast(e, DoubleType)) case VarianceSamp(e @ StringType()) => VarianceSamp(Cast(e, DoubleType)) case Skewness(e @ StringType()) => Skewness(Cast(e, DoubleType)) case Kurtosis(e @ StringType()) => Kurtosis(Cast(e, DoubleType)) } } /** * Handles type coercion for both IN expression with subquery and IN * expressions without subquery. * 1. In the first case, find the common type by comparing the left hand side (LHS) * expression types against corresponding right hand side (RHS) expression derived * from the subquery expression's plan output. Inject appropriate casts in the * LHS and RHS side of IN expression. * * 2. In the second case, convert the value and in list expressions to the * common operator type by looking at all the argument types and finding * the closest one that all the arguments can be cast to. When no common * operator type is found the original expression will be returned and an * Analysis Exception will be raised at the type checking phase. */ case class InConversion(conf: SQLConf) extends TypeCoercionRule { override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Skip nodes who's children have not been resolved yet. case e if !e.childrenResolved => e // Handle type casting required between value expression and subquery output // in IN subquery. case i @ InSubquery(lhs, ListQuery(sub, children, exprId, _)) if !i.resolved && lhs.length == sub.output.length => // LHS is the value expressions of IN subquery. // RHS is the subquery output. val rhs = sub.output val commonTypes = lhs.zip(rhs).flatMap { case (l, r) => findCommonTypeForBinaryComparison(l.dataType, r.dataType, conf) .orElse(findTightestCommonType(l.dataType, r.dataType)) } // The number of columns/expressions must match between LHS and RHS of an // IN subquery expression. if (commonTypes.length == lhs.length) { val castedRhs = rhs.zip(commonTypes).map { case (e, dt) if e.dataType != dt => Alias(Cast(e, dt), e.name)() case (e, _) => e } val newLhs = lhs.zip(commonTypes).map { case (e, dt) if e.dataType != dt => Cast(e, dt) case (e, _) => e } val newSub = Project(castedRhs, sub) InSubquery(newLhs, ListQuery(newSub, children, exprId, newSub.output)) } else { i } case i @ In(a, b) if b.exists(_.dataType != a.dataType) => findWiderCommonType(i.children.map(_.dataType)) match { case Some(finalDataType) => i.withNewChildren(i.children.map(Cast(_, finalDataType))) case None => i } } } /** * Changes numeric values to booleans so that expressions like true = 1 can be evaluated. */ object BooleanEquality extends Rule[LogicalPlan] { private val trueValues = Seq(1.toByte, 1.toShort, 1, 1L, Decimal.ONE) private val falseValues = Seq(0.toByte, 0.toShort, 0, 0L, Decimal.ZERO) def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Skip nodes who's children have not been resolved yet. case e if !e.childrenResolved => e // Hive treats (true = 1) as true and (false = 0) as true, // all other cases are considered as false. // We may simplify the expression if one side is literal numeric values // TODO: Maybe these rules should go into the optimizer. case EqualTo(bool @ BooleanType(), Literal(value, _: NumericType)) if trueValues.contains(value) => bool case EqualTo(bool @ BooleanType(), Literal(value, _: NumericType)) if falseValues.contains(value) => Not(bool) case EqualTo(Literal(value, _: NumericType), bool @ BooleanType()) if trueValues.contains(value) => bool case EqualTo(Literal(value, _: NumericType), bool @ BooleanType()) if falseValues.contains(value) => Not(bool) case EqualNullSafe(bool @ BooleanType(), Literal(value, _: NumericType)) if trueValues.contains(value) => And(IsNotNull(bool), bool) case EqualNullSafe(bool @ BooleanType(), Literal(value, _: NumericType)) if falseValues.contains(value) => And(IsNotNull(bool), Not(bool)) case EqualNullSafe(Literal(value, _: NumericType), bool @ BooleanType()) if trueValues.contains(value) => And(IsNotNull(bool), bool) case EqualNullSafe(Literal(value, _: NumericType), bool @ BooleanType()) if falseValues.contains(value) => And(IsNotNull(bool), Not(bool)) case EqualTo(left @ BooleanType(), right @ NumericType()) => EqualTo(Cast(left, right.dataType), right) case EqualTo(left @ NumericType(), right @ BooleanType()) => EqualTo(left, Cast(right, left.dataType)) case EqualNullSafe(left @ BooleanType(), right @ NumericType()) => EqualNullSafe(Cast(left, right.dataType), right) case EqualNullSafe(left @ NumericType(), right @ BooleanType()) => EqualNullSafe(left, Cast(right, left.dataType)) } } /** * This ensure that the types for various functions are as expected. */ object FunctionArgumentConversion extends TypeCoercionRule { override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Skip nodes who's children have not been resolved yet. case e if !e.childrenResolved => e case a @ CreateArray(children) if !haveSameType(children.map(_.dataType)) => val types = children.map(_.dataType) findWiderCommonType(types) match { case Some(finalDataType) => CreateArray(children.map(castIfNotSameType(_, finalDataType))) case None => a } case c @ Concat(children) if children.forall(c => ArrayType.acceptsType(c.dataType)) && !haveSameType(c.inputTypesForMerging) => val types = children.map(_.dataType) findWiderCommonType(types) match { case Some(finalDataType) => Concat(children.map(castIfNotSameType(_, finalDataType))) case None => c } case aj @ ArrayJoin(arr, d, nr) if !ArrayType(StringType).acceptsType(arr.dataType) && ArrayType.acceptsType(arr.dataType) => val containsNull = arr.dataType.asInstanceOf[ArrayType].containsNull ImplicitTypeCasts.implicitCast(arr, ArrayType(StringType, containsNull)) match { case Some(castedArr) => ArrayJoin(castedArr, d, nr) case None => aj } case s @ Sequence(_, _, _, timeZoneId) if !haveSameType(s.coercibleChildren.map(_.dataType)) => val types = s.coercibleChildren.map(_.dataType) findWiderCommonType(types) match { case Some(widerDataType) => s.castChildrenTo(widerDataType) case None => s } case m @ MapConcat(children) if children.forall(c => MapType.acceptsType(c.dataType)) && !haveSameType(m.inputTypesForMerging) => val types = children.map(_.dataType) findWiderCommonType(types) match { case Some(finalDataType) => MapConcat(children.map(castIfNotSameType(_, finalDataType))) case None => m } case m @ CreateMap(children) if m.keys.length == m.values.length && (!haveSameType(m.keys.map(_.dataType)) || !haveSameType(m.values.map(_.dataType))) => val keyTypes = m.keys.map(_.dataType) val newKeys = findWiderCommonType(keyTypes) match { case Some(finalDataType) => m.keys.map(castIfNotSameType(_, finalDataType)) case None => m.keys } val valueTypes = m.values.map(_.dataType) val newValues = findWiderCommonType(valueTypes) match { case Some(finalDataType) => m.values.map(castIfNotSameType(_, finalDataType)) case None => m.values } CreateMap(newKeys.zip(newValues).flatMap { case (k, v) => Seq(k, v) }) // Promote SUM, SUM DISTINCT and AVERAGE to largest types to prevent overflows. case s @ Sum(e @ DecimalType()) => s // Decimal is already the biggest. case Sum(e @ IntegralType()) if e.dataType != LongType => Sum(Cast(e, LongType)) case Sum(e @ FractionalType()) if e.dataType != DoubleType => Sum(Cast(e, DoubleType)) case s @ Average(e @ DecimalType()) => s // Decimal is already the biggest. case Average(e @ IntegralType()) if e.dataType != LongType => Average(Cast(e, LongType)) case Average(e @ FractionalType()) if e.dataType != DoubleType => Average(Cast(e, DoubleType)) // Hive lets you do aggregation of timestamps... for some reason case Sum(e @ TimestampType()) => Sum(Cast(e, DoubleType)) case Average(e @ TimestampType()) => Average(Cast(e, DoubleType)) // Coalesce should return the first non-null value, which could be any column // from the list. So we need to make sure the return type is deterministic and // compatible with every child column. case c @ Coalesce(es) if !haveSameType(c.inputTypesForMerging) => val types = es.map(_.dataType) findWiderCommonType(types) match { case Some(finalDataType) => Coalesce(es.map(castIfNotSameType(_, finalDataType))) case None => c } // When finding wider type for `Greatest` and `Least`, we should handle decimal types even if // we need to truncate, but we should not promote one side to string if the other side is // string.g case g @ Greatest(children) if !haveSameType(g.inputTypesForMerging) => val types = children.map(_.dataType) findWiderTypeWithoutStringPromotion(types) match { case Some(finalDataType) => Greatest(children.map(castIfNotSameType(_, finalDataType))) case None => g } case l @ Least(children) if !haveSameType(l.inputTypesForMerging) => val types = children.map(_.dataType) findWiderTypeWithoutStringPromotion(types) match { case Some(finalDataType) => Least(children.map(castIfNotSameType(_, finalDataType))) case None => l } case NaNvl(l, r) if l.dataType == DoubleType && r.dataType == FloatType => NaNvl(l, Cast(r, DoubleType)) case NaNvl(l, r) if l.dataType == FloatType && r.dataType == DoubleType => NaNvl(Cast(l, DoubleType), r) case NaNvl(l, r) if r.dataType == NullType => NaNvl(l, Cast(r, l.dataType)) } } /** * Hive only performs integral division with the DIV operator. The arguments to / are always * converted to fractional types. */ object Division extends TypeCoercionRule { override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Skip nodes who has not been resolved yet, // as this is an extra rule which should be applied at last. case e if !e.childrenResolved => e // Decimal and Double remain the same case d: Divide if d.dataType == DoubleType => d case d: Divide if d.dataType.isInstanceOf[DecimalType] => d case Divide(left, right) if isNumericOrNull(left) && isNumericOrNull(right) => Divide(Cast(left, DoubleType), Cast(right, DoubleType)) } private def isNumericOrNull(ex: Expression): Boolean = { // We need to handle null types in case a query contains null literals. ex.dataType.isInstanceOf[NumericType] || ex.dataType == NullType } } /** * Coerces the type of different branches of a CASE WHEN statement to a common type. */ object CaseWhenCoercion extends TypeCoercionRule { override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { case c: CaseWhen if c.childrenResolved && !haveSameType(c.inputTypesForMerging) => val maybeCommonType = findWiderCommonType(c.inputTypesForMerging) maybeCommonType.map { commonType => val newBranches = c.branches.map { case (condition, value) => (condition, castIfNotSameType(value, commonType)) } val newElseValue = c.elseValue.map(castIfNotSameType(_, commonType)) CaseWhen(newBranches, newElseValue) }.getOrElse(c) } } /** * Coerces the type of different branches of If statement to a common type. */ object IfCoercion extends TypeCoercionRule { override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { case e if !e.childrenResolved => e // Find tightest common type for If, if the true value and false value have different types. case i @ If(pred, left, right) if !haveSameType(i.inputTypesForMerging) => findWiderTypeForTwo(left.dataType, right.dataType).map { widestType => val newLeft = castIfNotSameType(left, widestType) val newRight = castIfNotSameType(right, widestType) If(pred, newLeft, newRight) }.getOrElse(i) // If there is no applicable conversion, leave expression unchanged. case If(Literal(null, NullType), left, right) => If(Literal.create(null, BooleanType), left, right) case If(pred, left, right) if pred.dataType == NullType => If(Cast(pred, BooleanType), left, right) } } /** * Coerces NullTypes in the Stack expression to the column types of the corresponding positions. */ object StackCoercion extends TypeCoercionRule { override def coerceTypes(plan: LogicalPlan): LogicalPlan = plan resolveExpressions { case s @ Stack(children) if s.childrenResolved && s.hasFoldableNumRows => Stack(children.zipWithIndex.map { // The first child is the number of rows for stack. case (e, 0) => e case (Literal(null, NullType), index: Int) => Literal.create(null, s.findDataType(index)) case (e, _) => e }) } } /** * Coerces the types of [[Concat]] children to expected ones. * * If `spark.sql.function.concatBinaryAsString` is false and all children types are binary, * the expected types are binary. Otherwise, the expected ones are strings. */ case class ConcatCoercion(conf: SQLConf) extends TypeCoercionRule { override protected def coerceTypes(plan: LogicalPlan): LogicalPlan = { plan resolveOperators { case p => p transformExpressionsUp { // Skip nodes if unresolved or empty children case c @ Concat(children) if !c.childrenResolved || children.isEmpty => c case c @ Concat(children) if conf.concatBinaryAsString || !children.map(_.dataType).forall(_ == BinaryType) => val newChildren = c.children.map { e => ImplicitTypeCasts.implicitCast(e, StringType).getOrElse(e) } c.copy(children = newChildren) } } } } /** * Coerces key types of two different [[MapType]] arguments of the [[MapZipWith]] expression * to a common type. */ object MapZipWithCoercion extends TypeCoercionRule { override protected def coerceTypes(plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Lambda function isn't resolved when the rule is executed. case m @ MapZipWith(left, right, function) if m.arguments.forall(a => a.resolved && MapType.acceptsType(a.dataType)) && !m.leftKeyType.sameType(m.rightKeyType) => findWiderTypeForTwo(m.leftKeyType, m.rightKeyType) match { case Some(finalKeyType) if !Cast.forceNullable(m.leftKeyType, finalKeyType) && !Cast.forceNullable(m.rightKeyType, finalKeyType) => val newLeft = castIfNotSameType( left, MapType(finalKeyType, m.leftValueType, m.leftValueContainsNull)) val newRight = castIfNotSameType( right, MapType(finalKeyType, m.rightValueType, m.rightValueContainsNull)) MapZipWith(newLeft, newRight, function) case _ => m } } } /** * Coerces the types of [[Elt]] children to expected ones. * * If `spark.sql.function.eltOutputAsString` is false and all children types are binary, * the expected types are binary. Otherwise, the expected ones are strings. */ case class EltCoercion(conf: SQLConf) extends TypeCoercionRule { override protected def coerceTypes(plan: LogicalPlan): LogicalPlan = { plan resolveOperators { case p => p transformExpressionsUp { // Skip nodes if unresolved or not enough children case c @ Elt(children) if !c.childrenResolved || children.size < 2 => c case c @ Elt(children) => val index = children.head val newIndex = ImplicitTypeCasts.implicitCast(index, IntegerType).getOrElse(index) val newInputs = if (conf.eltOutputAsString || !children.tail.map(_.dataType).forall(_ == BinaryType)) { children.tail.map { e => ImplicitTypeCasts.implicitCast(e, StringType).getOrElse(e) } } else { children.tail } c.copy(children = newIndex +: newInputs) } } } } /** * Turns Add/Subtract of DateType/TimestampType/StringType and CalendarIntervalType * to TimeAdd/TimeSub */ object DateTimeOperations extends Rule[LogicalPlan] { private val acceptedTypes = Seq(DateType, TimestampType, StringType) def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Skip nodes who's children have not been resolved yet. case e if !e.childrenResolved => e case Add(l @ CalendarIntervalType(), r) if acceptedTypes.contains(r.dataType) => Cast(TimeAdd(r, l), r.dataType) case Add(l, r @ CalendarIntervalType()) if acceptedTypes.contains(l.dataType) => Cast(TimeAdd(l, r), l.dataType) case Subtract(l, r @ CalendarIntervalType()) if acceptedTypes.contains(l.dataType) => Cast(TimeSub(l, r), l.dataType) } } /** * Casts types according to the expected input types for [[Expression]]s. */ object ImplicitTypeCasts extends TypeCoercionRule { override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { // Skip nodes who's children have not been resolved yet. case e if !e.childrenResolved => e case b @ BinaryOperator(left, right) if left.dataType != right.dataType => findTightestCommonType(left.dataType, right.dataType).map { commonType => if (b.inputType.acceptsType(commonType)) { // If the expression accepts the tightest common type, cast to that. val newLeft = if (left.dataType == commonType) left else Cast(left, commonType) val newRight = if (right.dataType == commonType) right else Cast(right, commonType) b.withNewChildren(Seq(newLeft, newRight)) } else { // Otherwise, don't do anything with the expression. b } }.getOrElse(b) // If there is no applicable conversion, leave expression unchanged. case e: ImplicitCastInputTypes if e.inputTypes.nonEmpty => val children: Seq[Expression] = e.children.zip(e.inputTypes).map { case (in, expected) => // If we cannot do the implicit cast, just use the original input. implicitCast(in, expected).getOrElse(in) } e.withNewChildren(children) case e: ExpectsInputTypes if e.inputTypes.nonEmpty => // Convert NullType into some specific target type for ExpectsInputTypes that don't do // general implicit casting. val children: Seq[Expression] = e.children.zip(e.inputTypes).map { case (in, expected) => if (in.dataType == NullType && !expected.acceptsType(NullType)) { Literal.create(null, expected.defaultConcreteType) } else { in } } e.withNewChildren(children) } /** * Given an expected data type, try to cast the expression and return the cast expression. * * If the expression already fits the input type, we simply return the expression itself. * If the expression has an incompatible type that cannot be implicitly cast, return None. */ def implicitCast(e: Expression, expectedType: AbstractDataType): Option[Expression] = { implicitCast(e.dataType, expectedType).map { dt => if (dt == e.dataType) e else Cast(e, dt) } } private def implicitCast(inType: DataType, expectedType: AbstractDataType): Option[DataType] = { // Note that ret is nullable to avoid typing a lot of Some(...) in this local scope. // We wrap immediately an Option after this. @Nullable val ret: DataType = (inType, expectedType) match { // If the expected type is already a parent of the input type, no need to cast. case _ if expectedType.acceptsType(inType) => inType // Cast null type (usually from null literals) into target types case (NullType, target) => target.defaultConcreteType // If the function accepts any numeric type and the input is a string, we follow the hive // convention and cast that input into a double case (StringType, NumericType) => NumericType.defaultConcreteType // Implicit cast among numeric types. When we reach here, input type is not acceptable. // If input is a numeric type but not decimal, and we expect a decimal type, // cast the input to decimal. case (d: NumericType, DecimalType) => DecimalType.forType(d) // For any other numeric types, implicitly cast to each other, e.g. long -> int, int -> long case (_: NumericType, target: NumericType) => target // Implicit cast between date time types case (DateType, TimestampType) => TimestampType case (TimestampType, DateType) => DateType // Implicit cast from/to string case (StringType, DecimalType) => DecimalType.SYSTEM_DEFAULT case (StringType, target: NumericType) => target case (StringType, DateType) => DateType case (StringType, TimestampType) => TimestampType case (StringType, BinaryType) => BinaryType // Cast any atomic type to string. case (any: AtomicType, StringType) if any != StringType => StringType // When we reach here, input type is not acceptable for any types in this type collection, // try to find the first one we can implicitly cast. case (_, TypeCollection(types)) => types.flatMap(implicitCast(inType, _)).headOption.orNull // Implicit cast between array types. // // Compare the nullabilities of the from type and the to type, check whether the cast of // the nullability is resolvable by the following rules: // 1. If the nullability of the to type is true, the cast is always allowed; // 2. If the nullability of the to type is false, and the nullability of the from type is // true, the cast is never allowed; // 3. If the nullabilities of both the from type and the to type are false, the cast is // allowed only when Cast.forceNullable(fromType, toType) is false. case (ArrayType(fromType, fn), ArrayType(toType: DataType, true)) => implicitCast(fromType, toType).map(ArrayType(_, true)).orNull case (ArrayType(fromType, true), ArrayType(toType: DataType, false)) => null case (ArrayType(fromType, false), ArrayType(toType: DataType, false)) if !Cast.forceNullable(fromType, toType) => implicitCast(fromType, toType).map(ArrayType(_, false)).orNull // Implicit cast between Map types. // Follows the same semantics of implicit casting between two array types. // Refer to documentation above. Make sure that both key and values // can not be null after the implicit cast operation by calling forceNullable // method. case (MapType(fromKeyType, fromValueType, fn), MapType(toKeyType, toValueType, tn)) if !Cast.forceNullable(fromKeyType, toKeyType) && Cast.resolvableNullability(fn, tn) => if (Cast.forceNullable(fromValueType, toValueType) && !tn) { null } else { val newKeyType = implicitCast(fromKeyType, toKeyType).orNull val newValueType = implicitCast(fromValueType, toValueType).orNull if (newKeyType != null && newValueType != null) { MapType(newKeyType, newValueType, tn) } else { null } } case _ => null } Option(ret) } } /** * Cast WindowFrame boundaries to the type they operate upon. */ object WindowFrameCoercion extends TypeCoercionRule { override protected def coerceTypes( plan: LogicalPlan): LogicalPlan = plan resolveExpressions { case s @ WindowSpecDefinition(_, Seq(order), SpecifiedWindowFrame(RangeFrame, lower, upper)) if order.resolved => s.copy(frameSpecification = SpecifiedWindowFrame( RangeFrame, createBoundaryCast(lower, order.dataType), createBoundaryCast(upper, order.dataType))) } private def createBoundaryCast(boundary: Expression, dt: DataType): Expression = { (boundary, dt) match { case (e: SpecialFrameBoundary, _) => e case (e, _: DateType) => e case (e, _: TimestampType) => e case (e: Expression, t) if e.dataType != t && Cast.canCast(e.dataType, t) => Cast(e, t) case _ => boundary } } } } trait TypeCoercionRule extends Rule[LogicalPlan] with Logging { /** * Applies any changes to [[AttributeReference]] data types that are made by the transform method * to instances higher in the query tree. */ def apply(plan: LogicalPlan): LogicalPlan = { val newPlan = coerceTypes(plan) if (plan.fastEquals(newPlan)) { plan } else { propagateTypes(newPlan) } } protected def coerceTypes(plan: LogicalPlan): LogicalPlan private def propagateTypes(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp { // No propagation required for leaf nodes. case q: LogicalPlan if q.children.isEmpty => q // Don't propagate types from unresolved children. case q: LogicalPlan if !q.childrenResolved => q case q: LogicalPlan => val inputMap = q.inputSet.toSeq.map(a => (a.exprId, a)).toMap q transformExpressions { case a: AttributeReference => inputMap.get(a.exprId) match { // This can happen when an Attribute reference is born in a non-leaf node, for // example due to a call to an external script like in the Transform operator. // TODO: Perhaps those should actually be aliases? case None => a // Leave the same if the dataTypes match. case Some(newType) if a.dataType == newType.dataType => a case Some(newType) => logDebug( s"Promoting $a from ${a.dataType} to ${newType.dataType} in ${q.simpleString}") newType } } } }
michalsenkyr/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
Scala
apache-2.0
46,490
package eva4s package matchmaking import eva4s.util._ /** Returns the fittest individuals of `pairs` tournaments. * * There will be `pairs` tournaments to determine the pairs. Each tournament consists of * `participants` randomly chosen participants. From these participants are the fittest two for the * pair chosen. * * @param participants amount of randomly selected individuals attending a tournament, must be * greater than or equal to 2 */ case class TournamentMatchmaker[G](participants: Int) extends Matchmaker[G] { require(participants >= 2, "participants must be greater than or equal to 2") override def apply(parents: Seq[Individual[G]], pairs: Int): Seq[IndividualP[G]] = { Vector.fill(pairs) { val winners = parents choose participants sortBy { _.fitness } take 2 (winners.head, winners.last) } } }
wookietreiber/eva4s-old
core/main/scala/matchmaking/TournamentMatchmaker.scala
Scala
gpl-3.0
860
package com.softwaremill.bootzooka.common import java.util.Random object Utils { def randomString(length: Int) = { val sb = new StringBuffer() val r = new Random() for (i <- 1 to length) { sb.append((r.nextInt(25) + 65).toChar) // A - Z } sb.toString } // see http://stackoverflow.com/questions/9655181/convert-from-byte-array-to-hex-string-in-java private val hexArray = "0123456789ABCDEF".toCharArray def toHex(bytes: Array[Byte]): String = { val hexChars = new Array[Char](bytes.length * 2) for (j <- bytes.indices) { val v = bytes(j) & 0xFF hexChars(j * 2) = hexArray(v >>> 4) hexChars(j * 2 + 1) = hexArray(v & 0x0F) } new String(hexChars) } /** * Based on scala.xml.Utility.escape. * Escapes the characters &lt; &gt; &amp; and &quot; from string. */ def escapeHtml(text: String): String = { object Escapes { /** * For reasons unclear escape and unescape are a long ways from * being logical inverses. */ val pairs = Map( "lt" -> '<', "gt" -> '>', "amp" -> '&', "quot" -> '"' // enigmatic comment explaining why this isn't escaped -- // is valid xhtml but not html, and IE doesn't know it, says jweb // "apos" -> '\\'' ) val escMap = pairs map { case (s, c) => c -> ("&%s;" format s) } val unescMap = pairs ++ Map("apos" -> '\\'') } /** * Appends escaped string to `s`. */ def escape(text: String, s: StringBuilder): StringBuilder = { // Implemented per XML spec: // http://www.w3.org/International/questions/qa-controls // imperative code 3x-4x faster than current implementation // dpp (David Pollak) 2010/02/03 val len = text.length var pos = 0 while (pos < len) { text.charAt(pos) match { case '<' => s.append("&lt;") case '>' => s.append("&gt;") case '&' => s.append("&amp;") case '"' => s.append("&quot;") case '\\n' => s.append('\\n') case '\\r' => s.append('\\r') case '\\t' => s.append('\\t') case c => if (c >= ' ') s.append(c) } pos += 1 } s } val sb = new StringBuilder escape(text, sb) sb.toString() } // Do not change this unless you understand the security issues behind timing attacks. // This method intentionally runs in constant time if the two strings have the same length. // If it didn't, it would be vulnerable to a timing attack. def constantTimeEquals(a: String, b: String): Boolean = if (a.length != b.length) { false } else { var equal = 0 for (i <- Array.range(0, a.length)) { equal |= a(i) ^ b(i) } equal == 0 } }
aywengo/bootzooka
backend/src/main/scala/com/softwaremill/bootzooka/common/Utils.scala
Scala
apache-2.0
2,828
/* * Copyright 2015 Databricks Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.databricks.spark.sql.perf.tpcds import scala.sys.process._ import org.slf4j.LoggerFactory import org.apache.spark.sql.functions._ import org.apache.spark.sql.types._ import org.apache.spark.sql.{Row, SQLContext, SaveMode} class Tables(sqlContext: SQLContext, dsdgenDir: String, scaleFactor: Int) extends Serializable { import sqlContext.implicits._ private val log = LoggerFactory.getLogger(getClass) def sparkContext = sqlContext.sparkContext val dsdgen = s"$dsdgenDir/dsdgen" case class Table(name: String, partitionColumns: Seq[String], fields: StructField*) { val schema = StructType(fields) def nonPartitioned: Table = { Table(name, Nil, fields : _*) } /** * If convertToSchema is true, the data from generator will be parsed into columns and * converted to `schema`. Otherwise, it just outputs the raw data (as a single STRING column). */ def df(convertToSchema: Boolean, numPartition: Int) = { val partitions = if (partitionColumns.isEmpty) 1 else numPartition val generatedData = { sparkContext.parallelize(1 to partitions, partitions).flatMap { i => val localToolsDir = if (new java.io.File(dsdgen).exists) { dsdgenDir } else if (new java.io.File(s"/$dsdgen").exists) { s"/$dsdgenDir" } else { sys.error(s"Could not find dsdgen at $dsdgen or /$dsdgen. Run install") } // Note: RNGSEED is the RNG seed used by the data generator. Right now, it is fixed to 100. val parallel = if (partitions > 1) s"-parallel $partitions -child $i" else "" val commands = Seq( "bash", "-c", s"cd $localToolsDir && ./dsdgen -table $name -filter Y -scale $scaleFactor -RNGSEED 100 $parallel") println(commands) commands.lines } } generatedData.setName(s"$name, sf=$scaleFactor, strings") val rows = generatedData.mapPartitions { iter => iter.map { l => if (convertToSchema) { val values = l.split("\\\\|", -1).dropRight(1).map { v => if (v.equals("")) { // If the string value is an empty string, we turn it to a null null } else { v } } Row.fromSeq(values) } else { Row.fromSeq(Seq(l)) } } } if (convertToSchema) { val stringData = sqlContext.createDataFrame( rows, StructType(schema.fields.map(f => StructField(f.name, StringType)))) val convertedData = { val columns = schema.fields.map { f => col(f.name).cast(f.dataType).as(f.name) } stringData.select(columns: _*) } convertedData } else { sqlContext.createDataFrame(rows, StructType(Seq(StructField("value", StringType)))) } } def useDoubleForDecimal(): Table = { val newFields = fields.map { field => val newDataType = field.dataType match { case decimal: DecimalType => DoubleType case other => other } field.copy(dataType = newDataType) } Table(name, partitionColumns, newFields:_*) } def genData( location: String, format: String, overwrite: Boolean, clusterByPartitionColumns: Boolean, filterOutNullPartitionValues: Boolean, numPartitions: Int): Unit = { val mode = if (overwrite) SaveMode.Overwrite else SaveMode.Ignore val data = df(format != "text", numPartitions) val tempTableName = s"${name}_text" data.registerTempTable(tempTableName) val writer = if (partitionColumns.nonEmpty) { if (clusterByPartitionColumns) { val columnString = data.schema.fields.map { field => field.name }.mkString(",") val partitionColumnString = partitionColumns.mkString(",") val predicates = if (filterOutNullPartitionValues) { partitionColumns.map(col => s"$col IS NOT NULL").mkString("WHERE ", " AND ", "") } else { "" } val query = s""" |SELECT | $columnString |FROM | $tempTableName |$predicates |DISTRIBUTE BY | $partitionColumnString """.stripMargin val grouped = sqlContext.sql(query) println(s"Pre-clustering with partitioning columns with query $query.") log.info(s"Pre-clustering with partitioning columns with query $query.") grouped.write } else { data.write } } else { // If the table is not partitioned, coalesce the data to a single file. data.coalesce(1).write } writer.format(format).mode(mode) if (partitionColumns.nonEmpty) { writer.partitionBy(partitionColumns : _*) } println(s"Generating table $name in database to $location with save mode $mode.") log.info(s"Generating table $name in database to $location with save mode $mode.") writer.save(location) sqlContext.dropTempTable(tempTableName) } def createExternalTable(location: String, format: String, databaseName: String, overwrite: Boolean): Unit = { val qualifiedTableName = databaseName + "." + name val tableExists = sqlContext.tableNames(databaseName).contains(name) if (overwrite) { sqlContext.sql(s"DROP TABLE IF EXISTS $databaseName.$name") } if (!tableExists || overwrite) { println(s"Creating external table $name in database $databaseName using data stored in $location.") log.info(s"Creating external table $name in database $databaseName using data stored in $location.") sqlContext.createExternalTable(qualifiedTableName, location, format) } } def createTemporaryTable(location: String, format: String): Unit = { println(s"Creating temporary table $name using data stored in $location.") log.info(s"Creating temporary table $name using data stored in $location.") sqlContext.read.format(format).load(location).registerTempTable(name) } } def genData( location: String, format: String, overwrite: Boolean, partitionTables: Boolean, useDoubleForDecimal: Boolean, clusterByPartitionColumns: Boolean, filterOutNullPartitionValues: Boolean, tableFilter: String = "", numPartitions: Int = 100): Unit = { var tablesToBeGenerated = if (partitionTables) { tables } else { tables.map(_.nonPartitioned) } if (!tableFilter.isEmpty) { tablesToBeGenerated = tablesToBeGenerated.filter(_.name == tableFilter) if (tablesToBeGenerated.isEmpty) { throw new RuntimeException("Bad table name filter: " + tableFilter) } } val withSpecifiedDataType = if (useDoubleForDecimal) { tablesToBeGenerated.map(_.useDoubleForDecimal()) } else { tablesToBeGenerated } withSpecifiedDataType.foreach { table => val tableLocation = s"$location/${table.name}" table.genData(tableLocation, format, overwrite, clusterByPartitionColumns, filterOutNullPartitionValues, numPartitions) } } def createExternalTables(location: String, format: String, databaseName: String, overwrite: Boolean, tableFilter: String = ""): Unit = { val filtered = if (tableFilter.isEmpty) { tables } else { tables.filter(_.name == tableFilter) } sqlContext.sql(s"CREATE DATABASE IF NOT EXISTS $databaseName") filtered.foreach { table => val tableLocation = s"$location/${table.name}" table.createExternalTable(tableLocation, format, databaseName, overwrite) } sqlContext.sql(s"USE $databaseName") println(s"The current database has been set to $databaseName.") log.info(s"The current database has been set to $databaseName.") } def createTemporaryTables(location: String, format: String, tableFilter: String = ""): Unit = { val filtered = if (tableFilter.isEmpty) { tables } else { tables.filter(_.name == tableFilter) } filtered.foreach { table => val tableLocation = s"$location/${table.name}" table.createTemporaryTable(tableLocation, format) } } val tables = Seq( Table("catalog_sales", partitionColumns = "cs_sold_date_sk" :: Nil, 'cs_sold_date_sk .int, 'cs_sold_time_sk .int, 'cs_ship_date_sk .int, 'cs_bill_customer_sk .int, 'cs_bill_cdemo_sk .int, 'cs_bill_hdemo_sk .int, 'cs_bill_addr_sk .int, 'cs_ship_customer_sk .int, 'cs_ship_cdemo_sk .int, 'cs_ship_hdemo_sk .int, 'cs_ship_addr_sk .int, 'cs_call_center_sk .int, 'cs_catalog_page_sk .int, 'cs_ship_mode_sk .int, 'cs_warehouse_sk .int, 'cs_item_sk .int, 'cs_promo_sk .int, 'cs_order_number .int, 'cs_quantity .int, 'cs_wholesale_cost .decimal(7,2), 'cs_list_price .decimal(7,2), 'cs_sales_price .decimal(7,2), 'cs_ext_discount_amt .decimal(7,2), 'cs_ext_sales_price .decimal(7,2), 'cs_ext_wholesale_cost .decimal(7,2), 'cs_ext_list_price .decimal(7,2), 'cs_ext_tax .decimal(7,2), 'cs_coupon_amt .decimal(7,2), 'cs_ext_ship_cost .decimal(7,2), 'cs_net_paid .decimal(7,2), 'cs_net_paid_inc_tax .decimal(7,2), 'cs_net_paid_inc_ship .decimal(7,2), 'cs_net_paid_inc_ship_tax .decimal(7,2), 'cs_net_profit .decimal(7,2)), Table("catalog_returns", partitionColumns = "cr_returned_date_sk" :: Nil, 'cr_returned_date_sk .int, 'cr_returned_time_sk .int, 'cr_item_sk .int, 'cr_refunded_customer_sk .int, 'cr_refunded_cdemo_sk .int, 'cr_refunded_hdemo_sk .int, 'cr_refunded_addr_sk .int, 'cr_returning_customer_sk .int, 'cr_returning_cdemo_sk .int, 'cr_returning_hdemo_sk .int, 'cr_returning_addr_sk .int, 'cr_call_center_sk .int, 'cr_catalog_page_sk .int, 'cr_ship_mode_sk .int, 'cr_warehouse_sk .int, 'cr_reason_sk .int, 'cr_order_number .int, 'cr_return_quantity .int, 'cr_return_amount .decimal(7,2), 'cr_return_tax .decimal(7,2), 'cr_return_amt_inc_tax .decimal(7,2), 'cr_fee .decimal(7,2), 'cr_return_ship_cost .decimal(7,2), 'cr_refunded_cash .decimal(7,2), 'cr_reversed_charge .decimal(7,2), 'cr_store_credit .decimal(7,2), 'cr_net_loss .decimal(7,2)), Table("inventory", partitionColumns = "inv_date_sk" :: Nil, 'inv_date_sk .int, 'inv_item_sk .int, 'inv_warehouse_sk .int, 'inv_quantity_on_hand .int), Table("store_sales", partitionColumns = "ss_sold_date_sk" :: Nil, 'ss_sold_date_sk .int, 'ss_sold_time_sk .int, 'ss_item_sk .int, 'ss_customer_sk .int, 'ss_cdemo_sk .int, 'ss_hdemo_sk .int, 'ss_addr_sk .int, 'ss_store_sk .int, 'ss_promo_sk .int, 'ss_ticket_number .int, 'ss_quantity .int, 'ss_wholesale_cost .decimal(7,2), 'ss_list_price .decimal(7,2), 'ss_sales_price .decimal(7,2), 'ss_ext_discount_amt .decimal(7,2), 'ss_ext_sales_price .decimal(7,2), 'ss_ext_wholesale_cost.decimal(7,2), 'ss_ext_list_price .decimal(7,2), 'ss_ext_tax .decimal(7,2), 'ss_coupon_amt .decimal(7,2), 'ss_net_paid .decimal(7,2), 'ss_net_paid_inc_tax .decimal(7,2), 'ss_net_profit .decimal(7,2)), Table("store_returns", partitionColumns = "sr_returned_date_sk" ::Nil, 'sr_returned_date_sk .long, 'sr_return_time_sk .long, 'sr_item_sk .long, 'sr_customer_sk .long, 'sr_cdemo_sk .long, 'sr_hdemo_sk .long, 'sr_addr_sk .long, 'sr_store_sk .long, 'sr_reason_sk .long, 'sr_ticket_number .long, 'sr_return_quantity .long, 'sr_return_amt .decimal(7,2), 'sr_return_tax .decimal(7,2), 'sr_return_amt_inc_tax.decimal(7,2), 'sr_fee .decimal(7,2), 'sr_return_ship_cost .decimal(7,2), 'sr_refunded_cash .decimal(7,2), 'sr_reversed_charge .decimal(7,2), 'sr_store_credit .decimal(7,2), 'sr_net_loss .decimal(7,2)), Table("web_sales", partitionColumns = "ws_sold_date_sk" :: Nil, 'ws_sold_date_sk .int, 'ws_sold_time_sk .int, 'ws_ship_date_sk .int, 'ws_item_sk .int, 'ws_bill_customer_sk .int, 'ws_bill_cdemo_sk .int, 'ws_bill_hdemo_sk .int, 'ws_bill_addr_sk .int, 'ws_ship_customer_sk .int, 'ws_ship_cdemo_sk .int, 'ws_ship_hdemo_sk .int, 'ws_ship_addr_sk .int, 'ws_web_page_sk .int, 'ws_web_site_sk .int, 'ws_ship_mode_sk .int, 'ws_warehouse_sk .int, 'ws_promo_sk .int, 'ws_order_number .int, 'ws_quantity .int, 'ws_wholesale_cost .decimal(7,2), 'ws_list_price .decimal(7,2), 'ws_sales_price .decimal(7,2), 'ws_ext_discount_amt .decimal(7,2), 'ws_ext_sales_price .decimal(7,2), 'ws_ext_wholesale_cost .decimal(7,2), 'ws_ext_list_price .decimal(7,2), 'ws_ext_tax .decimal(7,2), 'ws_coupon_amt .decimal(7,2), 'ws_ext_ship_cost .decimal(7,2), 'ws_net_paid .decimal(7,2), 'ws_net_paid_inc_tax .decimal(7,2), 'ws_net_paid_inc_ship .decimal(7,2), 'ws_net_paid_inc_ship_tax .decimal(7,2), 'ws_net_profit .decimal(7,2)), Table("web_returns", partitionColumns = "wr_returned_date_sk" ::Nil, 'wr_returned_date_sk .long, 'wr_returned_time_sk .long, 'wr_item_sk .long, 'wr_refunded_customer_sk .long, 'wr_refunded_cdemo_sk .long, 'wr_refunded_hdemo_sk .long, 'wr_refunded_addr_sk .long, 'wr_returning_customer_sk .long, 'wr_returning_cdemo_sk .long, 'wr_returning_hdemo_sk .long, 'wr_returning_addr_sk .long, 'wr_web_page_sk .long, 'wr_reason_sk .long, 'wr_order_number .long, 'wr_return_quantity .long, 'wr_return_amt .decimal(7,2), 'wr_return_tax .decimal(7,2), 'wr_return_amt_inc_tax .decimal(7,2), 'wr_fee .decimal(7,2), 'wr_return_ship_cost .decimal(7,2), 'wr_refunded_cash .decimal(7,2), 'wr_reversed_charge .decimal(7,2), 'wr_account_credit .decimal(7,2), 'wr_net_loss .decimal(7,2)), Table("call_center", partitionColumns = Nil, 'cc_call_center_sk .int, 'cc_call_center_id .string, 'cc_rec_start_date .date, 'cc_rec_end_date .date, 'cc_closed_date_sk .int, 'cc_open_date_sk .int, 'cc_name .string, 'cc_class .string, 'cc_employees .int, 'cc_sq_ft .int, 'cc_hours .string, 'cc_manager .string, 'cc_mkt_id .int, 'cc_mkt_class .string, 'cc_mkt_desc .string, 'cc_market_manager .string, 'cc_division .int, 'cc_division_name .string, 'cc_company .int, 'cc_company_name .string, 'cc_street_number .string, 'cc_street_name .string, 'cc_street_type .string, 'cc_suite_number .string, 'cc_city .string, 'cc_county .string, 'cc_state .string, 'cc_zip .string, 'cc_country .string, 'cc_gmt_offset .decimal(5,2), 'cc_tax_percentage .decimal(5,2)), Table("catalog_page", partitionColumns = Nil, 'cp_catalog_page_sk .int, 'cp_catalog_page_id .string, 'cp_start_date_sk .int, 'cp_end_date_sk .int, 'cp_department .string, 'cp_catalog_number .int, 'cp_catalog_page_number .int, 'cp_description .string, 'cp_type .string), Table("customer", partitionColumns = Nil, 'c_customer_sk .int, 'c_customer_id .string, 'c_current_cdemo_sk .int, 'c_current_hdemo_sk .int, 'c_current_addr_sk .int, 'c_first_shipto_date_sk .int, 'c_first_sales_date_sk .int, 'c_salutation .string, 'c_first_name .string, 'c_last_name .string, 'c_preferred_cust_flag .string, 'c_birth_day .int, 'c_birth_month .int, 'c_birth_year .int, 'c_birth_country .string, 'c_login .string, 'c_email_address .string, 'c_last_review_date .string), Table("customer_address", partitionColumns = Nil, 'ca_address_sk .int, 'ca_address_id .string, 'ca_street_number .string, 'ca_street_name .string, 'ca_street_type .string, 'ca_suite_number .string, 'ca_city .string, 'ca_county .string, 'ca_state .string, 'ca_zip .string, 'ca_country .string, 'ca_gmt_offset .decimal(5,2), 'ca_location_type .string), Table("customer_demographics", partitionColumns = Nil, 'cd_demo_sk .int, 'cd_gender .string, 'cd_marital_status .string, 'cd_education_status .string, 'cd_purchase_estimate .int, 'cd_credit_rating .string, 'cd_dep_count .int, 'cd_dep_employed_count .int, 'cd_dep_college_count .int), Table("date_dim", partitionColumns = Nil, 'd_date_sk .int, 'd_date_id .string, 'd_date .string, 'd_month_seq .int, 'd_week_seq .int, 'd_quarter_seq .int, 'd_year .int, 'd_dow .int, 'd_moy .int, 'd_dom .int, 'd_qoy .int, 'd_fy_year .int, 'd_fy_quarter_seq .int, 'd_fy_week_seq .int, 'd_day_name .string, 'd_quarter_name .string, 'd_holiday .string, 'd_weekend .string, 'd_following_holiday .string, 'd_first_dom .int, 'd_last_dom .int, 'd_same_day_ly .int, 'd_same_day_lq .int, 'd_current_day .string, 'd_current_week .string, 'd_current_month .string, 'd_current_quarter .string, 'd_current_year .string), Table("household_demographics", partitionColumns = Nil, 'hd_demo_sk .int, 'hd_income_band_sk .int, 'hd_buy_potential .string, 'hd_dep_count .int, 'hd_vehicle_count .int), Table("income_band", partitionColumns = Nil, 'ib_income_band_sk .int, 'ib_lower_bound .int, 'ib_upper_bound .int), Table("item", partitionColumns = Nil, 'i_item_sk .int, 'i_item_id .string, 'i_rec_start_date .string, 'i_rec_end_date .string, 'i_item_desc .string, 'i_current_price .decimal(7,2), 'i_wholesale_cost .decimal(7,2), 'i_brand_id .int, 'i_brand .string, 'i_class_id .int, 'i_class .string, 'i_category_id .int, 'i_category .string, 'i_manufact_id .int, 'i_manufact .string, 'i_size .string, 'i_formulation .string, 'i_color .string, 'i_units .string, 'i_container .string, 'i_manager_id .int, 'i_product_name .string), Table("promotion", partitionColumns = Nil, 'p_promo_sk .int, 'p_promo_id .string, 'p_start_date_sk .int, 'p_end_date_sk .int, 'p_item_sk .int, 'p_cost .decimal(15,2), 'p_response_target .int, 'p_promo_name .string, 'p_channel_dmail .string, 'p_channel_email .string, 'p_channel_catalog .string, 'p_channel_tv .string, 'p_channel_radio .string, 'p_channel_press .string, 'p_channel_event .string, 'p_channel_demo .string, 'p_channel_details .string, 'p_purpose .string, 'p_discount_active .string), Table("reason", partitionColumns = Nil, 'r_reason_sk .int, 'r_reason_id .string, 'r_reason_desc .string), Table("ship_mode", partitionColumns = Nil, 'sm_ship_mode_sk .int, 'sm_ship_mode_id .string, 'sm_type .string, 'sm_code .string, 'sm_carrier .string, 'sm_contract .string), Table("store", partitionColumns = Nil, 's_store_sk .int, 's_store_id .string, 's_rec_start_date .string, 's_rec_end_date .string, 's_closed_date_sk .int, 's_store_name .string, 's_number_employees .int, 's_floor_space .int, 's_hours .string, 's_manager .string, 's_market_id .int, 's_geography_class .string, 's_market_desc .string, 's_market_manager .string, 's_division_id .int, 's_division_name .string, 's_company_id .int, 's_company_name .string, 's_street_number .string, 's_street_name .string, 's_street_type .string, 's_suite_number .string, 's_city .string, 's_county .string, 's_state .string, 's_zip .string, 's_country .string, 's_gmt_offset .decimal(5,2), 's_tax_precentage .decimal(5,2)), Table("time_dim", partitionColumns = Nil, 't_time_sk .int, 't_time_id .string, 't_time .int, 't_hour .int, 't_minute .int, 't_second .int, 't_am_pm .string, 't_shift .string, 't_sub_shift .string, 't_meal_time .string), Table("warehouse", partitionColumns = Nil, 'w_warehouse_sk .int, 'w_warehouse_id .string, 'w_warehouse_name .string, 'w_warehouse_sq_ft .int, 'w_street_number .string, 'w_street_name .string, 'w_street_type .string, 'w_suite_number .string, 'w_city .string, 'w_county .string, 'w_state .string, 'w_zip .string, 'w_country .string, 'w_gmt_offset .decimal(5,2)), Table("web_page", partitionColumns = Nil, 'wp_web_page_sk .int, 'wp_web_page_id .string, 'wp_rec_start_date .date, 'wp_rec_end_date .date, 'wp_creation_date_sk .int, 'wp_access_date_sk .int, 'wp_autogen_flag .string, 'wp_customer_sk .int, 'wp_url .string, 'wp_type .string, 'wp_char_count .int, 'wp_link_count .int, 'wp_image_count .int, 'wp_max_ad_count .int), Table("web_site", partitionColumns = Nil, 'web_site_sk .int, 'web_site_id .string, 'web_rec_start_date .date, 'web_rec_end_date .date, 'web_name .string, 'web_open_date_sk .int, 'web_close_date_sk .int, 'web_class .string, 'web_manager .string, 'web_mkt_id .int, 'web_mkt_class .string, 'web_mkt_desc .string, 'web_market_manager .string, 'web_company_id .int, 'web_company_name .string, 'web_street_number .string, 'web_street_name .string, 'web_street_type .string, 'web_suite_number .string, 'web_city .string, 'web_county .string, 'web_state .string, 'web_zip .string, 'web_country .string, 'web_gmt_offset .string, 'web_tax_percentage .decimal(5,2)) ) }
josiahsams/spark-sql-perf-spark2.0.0
src/main/scala/com/databricks/spark/sql/perf/tpcds/Tables.scala
Scala
apache-2.0
27,893
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.log import java.util.{Collections, Locale, Properties} import scala.collection.JavaConverters._ import kafka.api.ApiVersion import kafka.message.{BrokerCompressionCodec, Message} import kafka.server.{KafkaConfig, ThrottledReplicaListValidator} import org.apache.kafka.common.errors.InvalidConfigurationException import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, TopicConfig} import org.apache.kafka.common.record.TimestampType import org.apache.kafka.common.utils.Utils import scala.collection.mutable import org.apache.kafka.common.config.ConfigDef.{ConfigKey, ValidList, Validator} object Defaults { val SegmentSize = kafka.server.Defaults.LogSegmentBytes val SegmentMs = kafka.server.Defaults.LogRollHours * 60 * 60 * 1000L val SegmentJitterMs = kafka.server.Defaults.LogRollJitterHours * 60 * 60 * 1000L val FlushInterval = kafka.server.Defaults.LogFlushIntervalMessages val FlushMs = kafka.server.Defaults.LogFlushSchedulerIntervalMs val RetentionSize = kafka.server.Defaults.LogRetentionBytes val RetentionMs = kafka.server.Defaults.LogRetentionHours * 60 * 60 * 1000L val MaxMessageSize = kafka.server.Defaults.MessageMaxBytes val MaxIndexSize = kafka.server.Defaults.LogIndexSizeMaxBytes val IndexInterval = kafka.server.Defaults.LogIndexIntervalBytes val FileDeleteDelayMs = kafka.server.Defaults.LogDeleteDelayMs val DeleteRetentionMs = kafka.server.Defaults.LogCleanerDeleteRetentionMs val MinCompactionLagMs = kafka.server.Defaults.LogCleanerMinCompactionLagMs val MinCleanableDirtyRatio = kafka.server.Defaults.LogCleanerMinCleanRatio val Compact = kafka.server.Defaults.LogCleanupPolicy val CleanupPolicy = kafka.server.Defaults.LogCleanupPolicy val UncleanLeaderElectionEnable = kafka.server.Defaults.UncleanLeaderElectionEnable val MinInSyncReplicas = kafka.server.Defaults.MinInSyncReplicas val CompressionType = kafka.server.Defaults.CompressionType val PreAllocateEnable = kafka.server.Defaults.LogPreAllocateEnable val MessageFormatVersion = kafka.server.Defaults.LogMessageFormatVersion val MessageTimestampType = kafka.server.Defaults.LogMessageTimestampType val MessageTimestampDifferenceMaxMs = kafka.server.Defaults.LogMessageTimestampDifferenceMaxMs val LeaderReplicationThrottledReplicas = Collections.emptyList[String]() val FollowerReplicationThrottledReplicas = Collections.emptyList[String]() val MaxIdMapSnapshots = kafka.server.Defaults.MaxIdMapSnapshots } case class LogConfig(props: java.util.Map[_, _]) extends AbstractConfig(LogConfig.configDef, props, false) { /** * Important note: Any configuration parameter that is passed along from KafkaConfig to LogConfig * should also go in [[kafka.server.KafkaServer.copyKafkaConfigToLog]]. */ val segmentSize = getInt(LogConfig.SegmentBytesProp) val segmentMs = getLong(LogConfig.SegmentMsProp) val segmentJitterMs = getLong(LogConfig.SegmentJitterMsProp) val maxIndexSize = getInt(LogConfig.SegmentIndexBytesProp) val flushInterval = getLong(LogConfig.FlushMessagesProp) val flushMs = getLong(LogConfig.FlushMsProp) val retentionSize = getLong(LogConfig.RetentionBytesProp) val retentionMs = getLong(LogConfig.RetentionMsProp) val maxMessageSize = getInt(LogConfig.MaxMessageBytesProp) val indexInterval = getInt(LogConfig.IndexIntervalBytesProp) val fileDeleteDelayMs = getLong(LogConfig.FileDeleteDelayMsProp) val deleteRetentionMs = getLong(LogConfig.DeleteRetentionMsProp) val compactionLagMs = getLong(LogConfig.MinCompactionLagMsProp) val minCleanableRatio = getDouble(LogConfig.MinCleanableDirtyRatioProp) val compact = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Compact) val delete = getList(LogConfig.CleanupPolicyProp).asScala.map(_.toLowerCase(Locale.ROOT)).contains(LogConfig.Delete) val uncleanLeaderElectionEnable = getBoolean(LogConfig.UncleanLeaderElectionEnableProp) val minInSyncReplicas = getInt(LogConfig.MinInSyncReplicasProp) val compressionType = getString(LogConfig.CompressionTypeProp).toLowerCase(Locale.ROOT) val preallocate = getBoolean(LogConfig.PreAllocateEnableProp) val messageFormatVersion = ApiVersion(getString(LogConfig.MessageFormatVersionProp)) val messageTimestampType = TimestampType.forName(getString(LogConfig.MessageTimestampTypeProp)) val messageTimestampDifferenceMaxMs = getLong(LogConfig.MessageTimestampDifferenceMaxMsProp).longValue val LeaderReplicationThrottledReplicas = getList(LogConfig.LeaderReplicationThrottledReplicasProp) val FollowerReplicationThrottledReplicas = getList(LogConfig.FollowerReplicationThrottledReplicasProp) def randomSegmentJitter: Long = if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs, segmentMs) } object LogConfig { def main(args: Array[String]) { println(configDef.toHtmlTable) } val SegmentBytesProp = TopicConfig.SEGMENT_BYTES_CONFIG val SegmentMsProp = TopicConfig.SEGMENT_MS_CONFIG val SegmentJitterMsProp = TopicConfig.SEGMENT_JITTER_MS_CONFIG val SegmentIndexBytesProp = TopicConfig.SEGMENT_INDEX_BYTES_CONFIG val FlushMessagesProp = TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG val FlushMsProp = TopicConfig.FLUSH_MS_CONFIG val RetentionBytesProp = TopicConfig.RETENTION_BYTES_CONFIG val RetentionMsProp = TopicConfig.RETENTION_MS_CONFIG val MaxMessageBytesProp = TopicConfig.MAX_MESSAGE_BYTES_CONFIG val IndexIntervalBytesProp = TopicConfig.INDEX_INTERVAL_BYTES_CONFIG val DeleteRetentionMsProp = TopicConfig.DELETE_RETENTION_MS_CONFIG val MinCompactionLagMsProp = TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG val FileDeleteDelayMsProp = TopicConfig.FILE_DELETE_DELAY_MS_CONFIG val MinCleanableDirtyRatioProp = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG val CleanupPolicyProp = TopicConfig.CLEANUP_POLICY_CONFIG val Delete = TopicConfig.CLEANUP_POLICY_DELETE val Compact = TopicConfig.CLEANUP_POLICY_COMPACT val UncleanLeaderElectionEnableProp = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG val MinInSyncReplicasProp = TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG val CompressionTypeProp = TopicConfig.COMPRESSION_TYPE_CONFIG val PreAllocateEnableProp = TopicConfig.PREALLOCATE_CONFIG val MessageFormatVersionProp = TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG val MessageTimestampTypeProp = TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG val MessageTimestampDifferenceMaxMsProp = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG // Leave these out of TopicConfig for now as they are replication quota configs val LeaderReplicationThrottledReplicasProp = "leader.replication.throttled.replicas" val FollowerReplicationThrottledReplicasProp = "follower.replication.throttled.replicas" val SegmentSizeDoc = TopicConfig.SEGMENT_BYTES_DOC val SegmentMsDoc = TopicConfig.SEGMENT_MS_DOC val SegmentJitterMsDoc = TopicConfig.SEGMENT_JITTER_MS_DOC val MaxIndexSizeDoc = TopicConfig.SEGMENT_INDEX_BYTES_DOC val FlushIntervalDoc = TopicConfig.FLUSH_MESSAGES_INTERVAL_DOC val FlushMsDoc = TopicConfig.FLUSH_MS_DOC val RetentionSizeDoc = TopicConfig.RETENTION_BYTES_DOC val RetentionMsDoc = TopicConfig.RETENTION_MS_DOC val MaxMessageSizeDoc = TopicConfig.MAX_MESSAGE_BYTES_DOC val IndexIntervalDoc = TopicConfig.INDEX_INTERVAL_BYTES_DOCS val FileDeleteDelayMsDoc = TopicConfig.FILE_DELETE_DELAY_MS_DOC val DeleteRetentionMsDoc = TopicConfig.DELETE_RETENTION_MS_DOC val MinCompactionLagMsDoc = TopicConfig.MIN_COMPACTION_LAG_MS_DOC val MinCleanableRatioDoc = TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_DOC val CompactDoc = TopicConfig.CLEANUP_POLICY_DOC val UncleanLeaderElectionEnableDoc = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_DOC val MinInSyncReplicasDoc = TopicConfig.MIN_IN_SYNC_REPLICAS_DOC val CompressionTypeDoc = TopicConfig.COMPRESSION_TYPE_DOC val PreAllocateEnableDoc = TopicConfig.PREALLOCATE_DOC val MessageFormatVersionDoc = TopicConfig.MESSAGE_FORMAT_VERSION_DOC val MessageTimestampTypeDoc = TopicConfig.MESSAGE_TIMESTAMP_TYPE_DOC val MessageTimestampDifferenceMaxMsDoc = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC val LeaderReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " + "the leader side. The list should describe a set of replicas in the form " + "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " + "all replicas for this topic." val FollowerReplicationThrottledReplicasDoc = "A list of replicas for which log replication should be throttled on " + "the follower side. The list should describe a set of " + "replicas in the form " + "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " + "all replicas for this topic." private class LogConfigDef extends ConfigDef { private final val serverDefaultConfigNames = mutable.Map[String, String]() def define(name: String, defType: ConfigDef.Type, defaultValue: Any, validator: Validator, importance: ConfigDef.Importance, doc: String, serverDefaultConfigName: String): LogConfigDef = { super.define(name, defType, defaultValue, validator, importance, doc) serverDefaultConfigNames.put(name, serverDefaultConfigName) this } def define(name: String, defType: ConfigDef.Type, defaultValue: Any, importance: ConfigDef.Importance, documentation: String, serverDefaultConfigName: String): LogConfigDef = { super.define(name, defType, defaultValue, importance, documentation) serverDefaultConfigNames.put(name, serverDefaultConfigName) this } def define(name: String, defType: ConfigDef.Type, importance: ConfigDef.Importance, documentation: String, serverDefaultConfigName: String): LogConfigDef = { super.define(name, defType, importance, documentation) serverDefaultConfigNames.put(name, serverDefaultConfigName) this } override def headers = List("Name", "Description", "Type", "Default", "Valid Values", "Server Default Property", "Importance").asJava override def getConfigValue(key: ConfigKey, headerName: String): String = { headerName match { case "Server Default Property" => serverDefaultConfigNames.get(key.name).get case _ => super.getConfigValue(key, headerName) } } def serverConfigName(configName: String): Option[String] = serverDefaultConfigNames.get(configName) } private val configDef: LogConfigDef = { import org.apache.kafka.common.config.ConfigDef.Importance._ import org.apache.kafka.common.config.ConfigDef.Range._ import org.apache.kafka.common.config.ConfigDef.Type._ import org.apache.kafka.common.config.ConfigDef.ValidString._ new LogConfigDef() .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(Message.MinMessageOverhead), MEDIUM, SegmentSizeDoc, KafkaConfig.LogSegmentBytesProp) .define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(0), MEDIUM, SegmentMsDoc, KafkaConfig.LogRollTimeMillisProp) .define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), MEDIUM, SegmentJitterMsDoc, KafkaConfig.LogRollTimeJitterMillisProp) .define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), MEDIUM, MaxIndexSizeDoc, KafkaConfig.LogIndexSizeMaxBytesProp) .define(FlushMessagesProp, LONG, Defaults.FlushInterval, atLeast(0), MEDIUM, FlushIntervalDoc, KafkaConfig.LogFlushIntervalMessagesProp) .define(FlushMsProp, LONG, Defaults.FlushMs, atLeast(0), MEDIUM, FlushMsDoc, KafkaConfig.LogFlushIntervalMsProp) // can be negative. See kafka.log.LogManager.cleanupSegmentsToMaintainSize .define(RetentionBytesProp, LONG, Defaults.RetentionSize, MEDIUM, RetentionSizeDoc, KafkaConfig.LogRetentionBytesProp) // can be negative. See kafka.log.LogManager.cleanupExpiredSegments .define(RetentionMsProp, LONG, Defaults.RetentionMs, MEDIUM, RetentionMsDoc, KafkaConfig.LogRetentionTimeMillisProp) .define(MaxMessageBytesProp, INT, Defaults.MaxMessageSize, atLeast(0), MEDIUM, MaxMessageSizeDoc, KafkaConfig.MessageMaxBytesProp) .define(IndexIntervalBytesProp, INT, Defaults.IndexInterval, atLeast(0), MEDIUM, IndexIntervalDoc, KafkaConfig.LogIndexIntervalBytesProp) .define(DeleteRetentionMsProp, LONG, Defaults.DeleteRetentionMs, atLeast(0), MEDIUM, DeleteRetentionMsDoc, KafkaConfig.LogCleanerDeleteRetentionMsProp) .define(MinCompactionLagMsProp, LONG, Defaults.MinCompactionLagMs, atLeast(0), MEDIUM, MinCompactionLagMsDoc, KafkaConfig.LogCleanerMinCompactionLagMsProp) .define(FileDeleteDelayMsProp, LONG, Defaults.FileDeleteDelayMs, atLeast(0), MEDIUM, FileDeleteDelayMsDoc, KafkaConfig.LogDeleteDelayMsProp) .define(MinCleanableDirtyRatioProp, DOUBLE, Defaults.MinCleanableDirtyRatio, between(0, 1), MEDIUM, MinCleanableRatioDoc, KafkaConfig.LogCleanerMinCleanRatioProp) .define(CleanupPolicyProp, LIST, Defaults.CleanupPolicy, ValidList.in(LogConfig.Compact, LogConfig.Delete), MEDIUM, CompactDoc, KafkaConfig.LogCleanupPolicyProp) .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, MEDIUM, UncleanLeaderElectionEnableDoc, KafkaConfig.UncleanLeaderElectionEnableProp) .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), MEDIUM, MinInSyncReplicasDoc, KafkaConfig.MinInSyncReplicasProp) .define(CompressionTypeProp, STRING, Defaults.CompressionType, in(BrokerCompressionCodec.brokerCompressionOptions:_*), MEDIUM, CompressionTypeDoc, KafkaConfig.CompressionTypeProp) .define(PreAllocateEnableProp, BOOLEAN, Defaults.PreAllocateEnable, MEDIUM, PreAllocateEnableDoc, KafkaConfig.LogPreAllocateProp) .define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, MEDIUM, MessageFormatVersionDoc, KafkaConfig.LogMessageFormatVersionProp) .define(MessageTimestampTypeProp, STRING, Defaults.MessageTimestampType, MEDIUM, MessageTimestampTypeDoc, KafkaConfig.LogMessageTimestampTypeProp) .define(MessageTimestampDifferenceMaxMsProp, LONG, Defaults.MessageTimestampDifferenceMaxMs, atLeast(0), MEDIUM, MessageTimestampDifferenceMaxMsDoc, KafkaConfig.LogMessageTimestampDifferenceMaxMsProp) .define(LeaderReplicationThrottledReplicasProp, LIST, Defaults.LeaderReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM, LeaderReplicationThrottledReplicasDoc, LeaderReplicationThrottledReplicasProp) .define(FollowerReplicationThrottledReplicasProp, LIST, Defaults.FollowerReplicationThrottledReplicas, ThrottledReplicaListValidator, MEDIUM, FollowerReplicationThrottledReplicasDoc, FollowerReplicationThrottledReplicasProp) } def apply(): LogConfig = LogConfig(new Properties()) def configNames: Seq[String] = configDef.names.asScala.toSeq.sorted def serverConfigName(configName: String): Option[String] = configDef.serverConfigName(configName) /** * Create a log config instance using the given properties and defaults */ def fromProps(defaults: java.util.Map[_ <: Object, _ <: Object], overrides: Properties): LogConfig = { val props = new Properties() props.putAll(defaults) props.putAll(overrides) LogConfig(props) } /** * Check that property names are valid */ def validateNames(props: Properties) { val names = configNames for(name <- props.asScala.keys) if (!names.contains(name)) throw new InvalidConfigurationException(s"Unknown topic config name: $name") } /** * Check that the given properties contain only valid log config names and that all values can be parsed and are valid */ def validate(props: Properties) { validateNames(props) configDef.parse(props) } }
ErikKringen/kafka
core/src/main/scala/kafka/log/LogConfig.scala
Scala
apache-2.0
16,860
package me.laiseca.urlmapper import me.laiseca.urlmapper.trie.Trie /** * Created by Xabier Laiseca on 27/07/14. */ trait UrlMapper[T] { def map(url: String): Option[T] } object UrlMapper { private def toPathTrie[T] (wildcard: String, recursiveWildcard: String, mappings: (String, T)*): PathTrie[T] = Trie((for { mapping <- mappings segments = toUrlSegments(mapping._1, wildcard, recursiveWildcard) } yield segments -> UrlMapping(mapping._1, segments, mapping._2)):_*) def apply[T](paths: PathTrie[T]): UrlMapper[T] = new DefaultUrlMapper(paths) def apply[T](paths: PathTrie[T], selectionAlgorithm: UrlMappingSelectionAlgorithm): UrlMapper[T] = new CustomizableUrlMapper(paths, selectionAlgorithm) def apply[T](wildcard: String, recursiveWildcard: String, mappings: (String, T)*): UrlMapper[T] = apply(toPathTrie(wildcard, recursiveWildcard, mappings:_*)) def apply[T](wildcard: String, recursiveWildcard: String, selectionAlgorithm: UrlMappingSelectionAlgorithm, mappings: (String, T)*): UrlMapper[T] = apply(toPathTrie(wildcard, recursiveWildcard, mappings:_*), selectionAlgorithm) def apply[T](mappings: (String, T)*): UrlMapper[T] = apply("*", "**", mappings:_*) def apply[T]( selectionAlgorithm: UrlMappingSelectionAlgorithm, mappings: (String, T)*): UrlMapper[T] = apply("*", "**", selectionAlgorithm, mappings:_*) } class CustomizableUrlMapper[T] private[urlmapper] (val paths: PathTrie[T], val selectionAlgorithm: UrlMappingSelectionAlgorithm) extends UrlMapper[T] { private val matcher = new UrlMatcher override def map(url: String): Option[T] = matcher.matchUrl(url, paths) match { case Nil => None case options: List[UrlMapping[T]] => Some(selectionAlgorithm.select[T](options).value) } } class DefaultUrlMapper[T] private[urlmapper] (val paths: PathTrie[T]) extends UrlMapper[T] { override def map(url: String): Option[T] = { def mapping(current: UrlSegment, rest: List[String], paths: PathTrie[T]) = { val subtrie = paths subtrie current if (subtrie.isEmpty) None else map(rest, subtrie) } def fixedMappings(segments: List[String], paths: PathTrie[T]): Option[UrlMapping[T]] = mapping(new FixedValueUrlSegment(segments.head), segments.tail, paths) def wildcardMappings(segments: List[String], paths: PathTrie[T]): Option[UrlMapping[T]] = mapping(WildcardUrlSegment, segments.tail, paths) def recursiveWildcardMapping(paths: PathTrie[T]): Option[UrlMapping[T]] = mapping(RecursiveWildcardUrlSegment, Nil, paths) def map(segments: List[String], paths: PathTrie[T]): Option[UrlMapping[T]] = segments match { case Nil => paths.value case _ => fixedMappings(segments, paths). orElse(wildcardMappings(segments, paths)). orElse(recursiveWildcardMapping(paths)) } map(toSegments(url), paths).map(_.value) } }
xabierlaiseca/scala-urlmapper
src/main/scala/me/laiseca/urlmapper/UrlMapper.scala
Scala
apache-2.0
2,919
package quizleague.firestore object Connection{ val apiKey = "AIzaSyBs6LpcOSpLMlKlzw0aPB6Ie-39mqlKrm8" val authDomain = "chiltern-ql-firestore.firebaseapp.com" val databaseURL = "https://chiltern-ql-firestore.firebaseio.com" val projectId = "chiltern-ql-firestore" val storageBucket = "chiltern-ql-firestore.appspot.com" val messagingSenderId = "891716942638" } //object Connection{ // val apiKey = "AIzaSyCTnCW1euWGpRohoEBESIdNEASM7rQ5gkY" // val authDomain = "ql-firestore-2.firebaseapp.com" // val databaseURL = "https=//ql-firestore-2.firebaseio.com" // val projectId = "ql-firestore-2" // val storageBucket = "ql-firestore-2.appspot.com" // val messagingSenderId = "659931577179" //} //object Connection { // val apiKey = "AIzaSyCszf1qcAadBzISfaQPHvwB2qGyVGJU08U" // val authDomain = "ql-firestore-gb2.firebaseapp.com" // val databaseURL= "https=//ql-firestore-gb2.firebaseio.com" // val projectId= "ql-firestore-gb2" // val storageBucket= "ql-firestore-gb2.appspot.com" // val messagingSenderId= "415442183710" // val appId= "1=415442183710=web=ae2f7f4f50a2c56d72700f" //}
gumdrop/quizleague-maintain
shared/src/main/scala/quizleague/firestore/Connection.scala
Scala
mit
1,114
// Jubatus: Online machine learning framework for distributed environment // Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License version 2.1 as published by the Free Software Foundation. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA package us.jubat.jubaql_server.processor import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import org.apache.spark.streaming.dstream.DStream import org.apache.spark.streaming.{Seconds, StreamingContext} import org.joda.time.format.ISODateTimeFormat import org.scalatest._ import scala.collection.mutable import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.future class SlidingStreamSpec extends FeatureSpec with GivenWhenThen with ShouldMatchers with BeforeAndAfterAll { val sc = new SparkContext("local[3]", "SlidingWindow") sc.setCheckpointDir("file:///tmp/spark") var ssc: StreamingContext = null feature("Count-based sliding windows") { ssc = new StreamingContext(sc, Seconds(1)) Given("a simple DStream") type T = Char val rawData = "abcd" :: "ef" :: "ghijklmn" :: "o" :: "pq" :: "rstuvwxyz" :: Nil val rawDataQueue = new mutable.Queue[List[T]]() rawData.foreach(rawDataQueue += _.toList) val itemsQueue = rawDataQueue.map(s => sc.parallelize(s.toList)) val inputStream: DStream[T] = ssc.queueStream(itemsQueue, oneAtATime = true) // set up sliding window streams for various window parameters. // (we need to set this up before the scenario() block to compute // all of them within the same StreamingContext run...) val streams = for (length <- 1 to 4; step <- Math.max(1, length - 1) to (length + 1)) yield { val windowStream = SlidingWindow.byCount(inputStream, length, step) // add the computed windows in each interval to a mutable list // for analysis val windowsPerInterval = mutable.ListBuffer[List[(Long, List[(Long, T)])]]() windowStream.foreachRDD(rdd => { val windows = rdd.groupByKey().mapValues(_.toList).collect().toList windowsPerInterval += windows }) (length, step, windowsPerInterval) } waitUntilProcessingEnds(inputStream, rawData.size) for ((length, step, windowsPerInterval) <- streams) { checkCountResult(length, step, windowsPerInterval, rawData.map(_.toList)) } } feature("Count-based sliding windows (weird data distribution)") { ssc = new StreamingContext(sc, Seconds(1)) Given("a weird DStream") type T = Char val rawData = "ab" :: "" :: "cde" :: "f" :: "ghijklmnopqrstuvwxy" :: "z" :: Nil val rawDataQueue = new mutable.Queue[List[T]]() rawData.foreach(rawDataQueue += _.toList) val itemsQueue = rawDataQueue.map(s => sc.parallelize(s.toList)) val inputStream: DStream[T] = ssc.queueStream(itemsQueue, oneAtATime = true) // set up sliding window streams for various window parameters. // (we need to set this up before the scenario() block to compute // all of them within the same StreamingContext run...) val streams = for (length <- 4 to 4; step <- 1 to (length + 1)) yield { val windowStream = SlidingWindow.byCount(inputStream, length, step) // add the computed windows in each interval to a mutable list // for analysis val windowsPerInterval = mutable.ListBuffer[List[(Long, List[(Long, T)])]]() windowStream.foreachRDD(rdd => { val windows = rdd.groupByKey().mapValues(_.toList).collect().toList windowsPerInterval += windows }) (length, step, windowsPerInterval) } waitUntilProcessingEnds(inputStream, rawData.size) for ((length, step, windowsPerInterval) <- streams) { checkCountResult(length, step, windowsPerInterval, rawData.map(_.toList)) } } feature("Timestamp-based sliding windows") { ssc = new StreamingContext(sc, Seconds(1)) ssc.checkpoint("file:///tmp/spark") Given("a simple DStream") val parser = ISODateTimeFormat.dateHourMinuteSecondFraction() type T = String val rawDataUnparsed = List(("a", "2015-01-09T16:23:34.031128377"), // 1420788214031 ("b", "2015-01-09T16:23:34.035617484"), // 1420788214035 ("c", "2015-01-09T16:23:34.132088288"), // 1420788214132 ("d", "2015-01-09T16:23:35.136510729"), // 1420788215136 ("e", "2015-01-09T16:23:36.004067229"), // 1420788216004 ("f", "2015-01-09T16:23:36.039922085"), // 1420788216039 ("g", "2015-01-09T16:23:36.106793425"), // 1420788216106 ("h", "2015-01-09T16:23:36.140707388"), // 1420788216140 ("i", "2015-01-09T16:23:38.010804037")) :: // 1420788218010 List(("j", "2015-01-09T16:23:38.111558838"), // 1420788218111 ("k", "2015-01-09T16:23:40.015171109"), // 1420788220015 ("l", "2015-01-09T16:23:40.020104192"), // 1420788220020 ("m", "2015-01-09T16:23:40.116416331"), // 1420788220116 ("n", "2015-01-09T16:23:40.121018469")) :: // 1420788220121 List() :: List(("o", "2015-01-09T16:23:41.024491603")) :: // 1420788221024 List(("p", "2015-01-09T16:23:41.125453953"), // 1420788221125 ("q", "2015-01-09T16:23:42.029155492"), // 1420788222029 ("r", "2015-01-09T16:23:42.129897677"), // 1420788222129 ("s", "2015-01-09T16:23:43.033451709"), // 1420788223033 ("t", "2015-01-09T16:23:43.134198126"), // 1420788223134 ("u", "2015-01-09T16:23:44.037491118"), // 1420788224037 ("v", "2015-01-09T16:23:44.138379275")) :: // 1420788224138 List(("w", "2015-01-09T16:23:45.001737653"), // 1420788225001 ("x", "2015-01-09T16:23:45.102486660"), // 1420788225102 ("y", "2015-01-09T16:23:46.006160454"), // 1420788226006 ("z", "2015-01-09T16:23:46.107519895")) :: // 1420788226107 Nil val rawData = rawDataUnparsed.map(_.map(item => { val (data, timestamp) = item (parser.parseMillis(timestamp), data) })) val rawDataQueue = new mutable.Queue[List[(Long, T)]]() rawData.foreach(rawDataQueue += _) val itemsQueue = rawDataQueue.map(s => sc.parallelize(s)) val inputStream: DStream[(Long, T)] = ssc.queueStream(itemsQueue, oneAtATime = true) // set up sliding window streams for various window parameters. // (we need to set this up before the scenario() block to compute // all of them within the same StreamingContext run...) val streams = for (length <- 3 to 4; step <- Math.max(1, length - 1) to (length + 1)) yield { //val step = 3 val windowStream = SlidingWindow.byTimestamp(inputStream, length, step) // add the computed windows in each interval to a mutable list // for analysis val windowsPerInterval = mutable.ListBuffer[List[(Long, List[(Long, T)])]]() windowStream.foreachRDD(rdd => { val windows = rdd.groupByKey().mapValues(_.toList).collect().toList windowsPerInterval += windows }) (length, step, windowsPerInterval) } waitUntilProcessingEnds(inputStream, rawData.size) for ((length, step, windowsPerInterval) <- streams) { checkTimestampResult(length * 1000, step * 1000, windowsPerInterval, rawData) } } override def afterAll(): Unit = { sc.stop() super.afterAll() } protected def checkCountResult[T](length: Int, step: Int, windowsPerInterval: Seq[List[(Long, List[(Long, T)])]], rawData: List[List[T]]) = { scenario(s"Window length $length and step size $step") { When("we compute sliding windows by count") Then("the group indexes in every interval should be continuous") val minMaxIndexes = windowsPerInterval.filterNot(_.isEmpty).map(windows => { val groupIndexes = windows.map(_._1) val minGroupIndex = groupIndexes.min val maxGroupIndex = groupIndexes.max // an interval should contain all group indexes between min and max group // index (i.e., there should be no gaps within an interval) groupIndexes.sorted shouldBe ((minGroupIndex to maxGroupIndex).toList) (minGroupIndex, maxGroupIndex) }) And("they should increase between intervals") // i.e. there should be no gaps between intervals minMaxIndexes.take(minMaxIndexes.size - 1).zip(minMaxIndexes.tail).foreach(pair => { val (previousIndexes, currentIndexes) = pair previousIndexes._2 + 1 shouldBe (currentIndexes._1) }) And("all groups should have the correct elements") val sortedWindows = windowsPerInterval.reduceLeft(_ ++ _).sortBy(_._1) val ourSlidingWindows: List[Seq[T]] = sortedWindows.map(groupWithIdx => groupWithIdx._2.toSeq.sortBy(_._1).map(_._2)) // compare with the iterator.sliding() results val slidingIterator = rawData.reduceLeft(_ ++ _).iterator.sliding(length, step) val completeWindows = slidingIterator.filter(_.size == length).toList // (maybe) drop last window because that will never be completed // in our implementation val referenceSlidingWindows = completeWindows.take(ourSlidingWindows.size) ourSlidingWindows shouldBe (referenceSlidingWindows) info(ourSlidingWindows.toString().take(40) + " ...") } } protected def checkTimestampResult[T](length: Int, step: Int, windowsPerInterval: Seq[List[(Long, List[(Long, T)])]], rawData: List[List[(Long, T)]]) = { scenario(s"Window length $length and step size $step") { When("we compute sliding windows by count") Then("the group timestamps should increase between intervals") val minMaxTimestamps = windowsPerInterval.filterNot(_.isEmpty).map(windows => { val groupTimestamps = windows.map(_._1) (groupTimestamps.min, groupTimestamps.max) }) minMaxTimestamps.take(minMaxTimestamps.size - 1).zip(minMaxTimestamps.tail).foreach(pair => { val (previousTimestamps, currentTimestamps) = pair previousTimestamps._2 shouldBe <(currentTimestamps._1) }) And("all groups should have the correct elements") val sortedWindows = windowsPerInterval.reduceLeft(_ ++ _).sortBy(_._1) val ourSlidingWindows: List[(Long, Seq[(Long, T)])] = sortedWindows.map(groupWithIdx => (groupWithIdx._1, groupWithIdx._2.sortBy(_._1))) // compare with an inefficient, but probably correct implementation val allRawData = rawData.reduceLeft(_ ++ _).sortBy(_._1) val batchSize = ((length - 1) / step + 1) * step val minGroupTimestamp = allRawData.map(_._1).min / batchSize * batchSize val maxGroupTimestamp = allRawData.map(_._1).max / batchSize * batchSize val slidingIterator = (for (groupTimestamp <- minGroupTimestamp to maxGroupTimestamp by step) yield { val items = allRawData.filter(kv => kv._1 >= groupTimestamp && kv._1 < groupTimestamp + length ) (groupTimestamp, items) }).filterNot(_._2.isEmpty).toList // (maybe) drop last window because that will never be completed // in our implementation val referenceSlidingWindows = slidingIterator.take(ourSlidingWindows.size) ourSlidingWindows shouldBe (referenceSlidingWindows) } } protected def waitUntilProcessingEnds(stream: DStream[_], numIterations: Int) = { // count up in every interval val i = sc.accumulator(0) stream.foreachRDD(rdd => i += 1) // start processing ssc.start() // stop streaming context when i has become numIterations future { while (i.value < numIterations + 1) Thread.sleep(100) ssc.stop(stopSparkContext = false, stopGracefully = true) } // wait for termination ssc.awaitTermination() } }
jubatus/jubaql-server
processor/src/test/scala/us/jubat/jubaql_server/processor/SlidingStreamSpec.scala
Scala
lgpl-2.1
12,531
package me.venuatu.background_tracker import android.app.Fragment import android.os.Bundle import android.view._ import macroid.{Contexts, IdGeneration} class BaseFragment(layout: Int = -1, menuLayout: Int = -1) extends Fragment with Contexts[Fragment] with IdGeneration { def ctx = getActivity.asInstanceOf[BaseActivity] def onUiThread(block: => Unit) { try { ctx.onUiThread(block) } catch { case e: Exception => e.printStackTrace() } } override def onCreate(instanceState: Bundle) { super.onCreate(instanceState) setHasOptionsMenu(menuLayout != -1) } override def onCreateOptionsMenu(menu: Menu, inflater: MenuInflater) { super.onCreateOptionsMenu(menu, inflater) inflater.inflate(menuLayout, menu) } protected var view: View = null override def onCreateView(inflater: LayoutInflater, container: ViewGroup, instanceState: Bundle): View = { if (view == null && layout != -1) { view = inflater.inflate(layout, container, false) } view } }
venuatu/background-tracker
src/main/scala/me/venuatu/background_tracker/BaseFragment.scala
Scala
unlicense
1,050
package tests.admin import akka.actor.{ActorRef, ActorSystem} import akka.testkit.{ImplicitSender, TestKit} import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpecLike} import sisdn.admin.Organization import sisdn.admin.Organization._ import sisdn.common._ class AdminOrganizationSpecs(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FlatSpecLike with Matchers with BeforeAndAfterAll { def this() = this(ActorSystem("AdminOrganizationSpecs")) override def afterAll() { TestKit.shutdownActorSystem(system) } val faculty = Faculty(uuid, "title1", None, Some(uuid)) val department = Department(uuid, "id1", "dep1", None, Some(uuid)) val user = User("name", "test-org", None, None, None) "AddFaculty" should "Accept valid entry" in { val adminOrg = system.actorOf(Organization.props) adminOrg ! AddFaculty("uniq", user, faculty) expectMsg(SisdnCreated("uniq")) } it should "fail for duplicate faculty addition" in { val adminOrg = system.actorOf(Organization.props) val fac = faculty.copy(id = uuid) adminOrg ! AddFaculty("1", user, fac) expectMsg(SisdnCreated("1")) adminOrg ! AddFaculty("1", user, fac) expectMsg(SisdnInvalid("1", "Duplicate faculty")) } "UpdateFaculty" should "Successfully update existing faculty" in { val adminOrg = system.actorOf(Organization.props) val fac = faculty.copy(id = uuid) adminOrg ! AddFaculty("1", user, fac) expectMsg(SisdnCreated("1")) adminOrg ! UpdateFaculty("1", user, fac) expectMsg(SisdnUpdated("1")) } it should "Fail update of non-existing faculty" in { val adminOrg = system.actorOf(Organization.props) adminOrg ! UpdateFaculty("1", user, faculty.copy(id = "non-existing")) expectMsg(SisdnNotFound("1")) } "Add Department" should "fail if added with non-existing faculty" in { val adminOrg = system.actorOf(Organization.props) adminOrg ! AddDepartment("1", user, department) expectMsg(SisdnInvalid("1", "Faculty does not exist or is inactive")) } it should "fail if added with inactive faculty" in { val adminOrg = system.actorOf(Organization.props) val uid = uuid adminOrg ! AddFaculty("1", user, faculty.copy(id = uid, isActive = Some(false))) expectMsg(SisdnCreated("1")) adminOrg ! AddDepartment("1", user, department.copy( facultyId = uid)) expectMsg(SisdnInvalid("1", "Faculty does not exist or is inactive")) } "Organization state" should "correctly add department to state" in { val state = new State(system) state.update(DepartmentAdded("", "", department,0)) state.departments should contain (department) } it should "correctly update faculty in state" in { val state = new State(system) state.update(DepartmentAdded("", "", department,0)) state.update(DepartmentUpdated("", "", department.copy(titleTr = Some("test")),0)) val result = state.departments.find(_.id == department.id ).get.titleTr result shouldEqual Some("test") state.departments.size shouldEqual 1 } }
sisdn/backend
src/test/scala/tests/admin/AdminOrganizationSpecs.scala
Scala
agpl-3.0
3,061
/* * Copyright 2011-2018 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.commons.util import io.gatling.BaseSpec class RoundRobinSpec extends BaseSpec { "round robin" should "work fine with non empty Iterable" in { val rr = RoundRobin(Array(1, 2, 3)) rr.next shouldBe 1 rr.next shouldBe 2 rr.next shouldBe 3 rr.next shouldBe 1 rr.next shouldBe 2 rr.next shouldBe 3 } it should "always return the same value when iterating a single value Iterable" in { val rr = RoundRobin(Array(1)) rr.next shouldBe 1 rr.next shouldBe 1 rr.next shouldBe 1 rr.next shouldBe 1 rr.next shouldBe 1 rr.next shouldBe 1 } it should "throw NoSuchElementException when iterating on an empty Iterable" in { val rr = RoundRobin(Array.empty[Int]) a[NoSuchElementException] should be thrownBy rr.next } }
wiacekm/gatling
gatling-commons/src/test/scala/io/gatling/commons/util/RoundRobinSpec.scala
Scala
apache-2.0
1,425
package org.broadinstitute.clio.client.metadata import java.net.URI import org.broadinstitute.clio.client.dispatch.MoveExecutor.MoveOp import org.broadinstitute.clio.transfer.model.arrays.{ArraysExtensions, ArraysMetadata} class ArrayMover extends MetadataMover[ArraysMetadata] { override protected def moveMetadata( src: ArraysMetadata, destination: URI, newBasename: Option[String] ): (ArraysMetadata, Iterable[MoveOp]) = { import MetadataMover.buildFilePath val dest = src.copy( vcfPath = src.vcfPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.VcfGzExtension) ) ), vcfIndexPath = src.vcfIndexPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.VcfGzTbiExtension) ) ), gtcPath = src.gtcPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.GtcExtension) ) ), // Note - the params file is named 'params.txt' - it does not get renamed. paramsPath = src.paramsPath.map( buildFilePath( _, destination ) ), fingerprintingDetailMetricsPath = src.fingerprintingDetailMetricsPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.FingerprintingDetailMetricsExtension) ) ), fingerprintingSummaryMetricsPath = src.fingerprintingSummaryMetricsPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.FingerprintingSummaryMetricsExtension) ) ), genotypeConcordanceContingencyMetricsPath = src.genotypeConcordanceContingencyMetricsPath.map( buildFilePath( _, destination, newBasename.map( _ + ArraysExtensions.GenotypeConcordanceContingencyMetricsExtension ) ) ), genotypeConcordanceDetailMetricsPath = src.genotypeConcordanceDetailMetricsPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.GenotypeConcordanceDetailMetricsExtension) ) ), genotypeConcordanceSummaryMetricsPath = src.genotypeConcordanceSummaryMetricsPath.map( buildFilePath( _, destination, newBasename.map( _ + ArraysExtensions.GenotypeConcordanceSummaryMetricsExtension ) ) ), variantCallingDetailMetricsPath = src.variantCallingDetailMetricsPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.VariantCallingDetailMetricsExtension) ) ), variantCallingSummaryMetricsPath = src.variantCallingSummaryMetricsPath.map( buildFilePath( _, destination, newBasename.map(_ + ArraysExtensions.VariantCallingSummaryMetricsExtension) ) ) ) val ops = Seq[ArraysMetadata => Iterable[URI]]( _.vcfPath, _.vcfIndexPath, _.gtcPath, _.paramsPath, _.fingerprintingDetailMetricsPath, _.fingerprintingSummaryMetricsPath, _.genotypeConcordanceContingencyMetricsPath, _.genotypeConcordanceDetailMetricsPath, _.genotypeConcordanceSummaryMetricsPath, _.variantCallingDetailMetricsPath, _.variantCallingSummaryMetricsPath ).flatMap(extractMoves(src, dest, _)) (dest, ops) } }
broadinstitute/clio
clio-client/src/main/scala/org/broadinstitute/clio/client/metadata/ArrayMover.scala
Scala
bsd-3-clause
3,620
package com.github.jeanadrien.gatling.mqtt.actions import com.github.jeanadrien.gatling.mqtt.client.{FuseSourceConnectionListener, MqttCommands} import com.github.jeanadrien.gatling.mqtt.protocol.{ConnectionSettings, MqttComponents} import io.gatling.commons.stats._ import io.gatling.commons.util.ClockSingleton._ import io.gatling.core.CoreComponents import io.gatling.core.Predef._ import io.gatling.core.action.Action import akka.pattern.ask import akka.util.Timeout import scala.concurrent.duration._ import scala.util.{Failure, Success} /** * */ class ConnectAction( mqttComponents : MqttComponents, coreComponents : CoreComponents, connectionSettings : ConnectionSettings, val next : Action ) extends MqttAction(mqttComponents, coreComponents) { import mqttComponents.system.dispatcher override val name = genName("mqttConnect") override def execute(session : Session) : Unit = recover(session) { val connectionId = genName("mqttClient") mqttComponents.mqttEngine(session, connectionSettings, connectionId).flatMap { mqtt => val requestName = "connect" logger.debug(s"${connectionId}: Execute ${requestName}") // connect implicit val timeout = Timeout(1 minute) // TODO check how to configure this val requestStartDate = nowMillis (mqtt ? MqttCommands.Connect).mapTo[MqttCommands].onComplete { case Success(MqttCommands.ConnectAck) => val connectTiming = timings(requestStartDate) statsEngine.logResponse( session, requestName, connectTiming, OK, None, None ) next ! session. set("engine", mqtt). set("connectionId", connectionId) case Failure(th) => val connectTiming = timings(requestStartDate) logger.warn(s"${connectionId}: Failed to connect to MQTT: ${th}") statsEngine.logResponse( session, requestName, connectTiming, KO, None, Some(th.getMessage) ) next ! session.markAsFailed } } } }
jeanadrien/gatling-mqtt-protocol
src/main/scala/com/github/jeanadrien/gatling/mqtt/actions/ConnectAction.scala
Scala
apache-2.0
2,512
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler import java.util.concurrent._ import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong} import scala.collection.mutable import scala.util.DynamicVariable import com.codahale.metrics.{Counter, Gauge, MetricRegistry, Timer} import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.metrics.MetricsSystem import org.apache.spark.metrics.source.Source import org.apache.spark.util.Utils /** * Asynchronously passes SparkListenerEvents to registered SparkListeners. * * Until `start()` is called, all posted events are only buffered. Only after this listener bus * has started will events be actually propagated to all attached listeners. This listener bus * is stopped when `stop()` is called, and it will drop further events after stopping. */ private[spark] class LiveListenerBus(conf: SparkConf) extends SparkListenerBus { self => import LiveListenerBus._ private var sparkContext: SparkContext = _ // Cap the capacity of the event queue so we get an explicit error (rather than // an OOM exception) if it's perpetually being added to more quickly than it's being drained. private val eventQueue = new LinkedBlockingQueue[SparkListenerEvent](conf.get(LISTENER_BUS_EVENT_QUEUE_CAPACITY)) private[spark] val metrics = new LiveListenerBusMetrics(conf, eventQueue) // Indicate if `start()` is called private val started = new AtomicBoolean(false) // Indicate if `stop()` is called private val stopped = new AtomicBoolean(false) /** A counter for dropped events. It will be reset every time we log it. */ private val droppedEventsCounter = new AtomicLong(0L) /** When `droppedEventsCounter` was logged last time in milliseconds. */ @volatile private var lastReportTimestamp = 0L // Indicate if we are processing some event // Guarded by `self` private var processingEvent = false private val logDroppedEvent = new AtomicBoolean(false) // A counter that represents the number of events produced and consumed in the queue private val eventLock = new Semaphore(0) private val listenerThread = new Thread(name) { setDaemon(true) override def run(): Unit = Utils.tryOrStopSparkContext(sparkContext) { LiveListenerBus.withinListenerThread.withValue(true) { val timer = metrics.eventProcessingTime while (true) { eventLock.acquire() self.synchronized { processingEvent = true } try { val event = eventQueue.poll if (event == null) { // Get out of the while loop and shutdown the daemon thread if (!stopped.get) { throw new IllegalStateException("Polling `null` from eventQueue means" + " the listener bus has been stopped. So `stopped` must be true") } return } val timerContext = timer.time() try { postToAll(event) } finally { timerContext.stop() } } finally { self.synchronized { processingEvent = false } } } } } } override protected def getTimer(listener: SparkListenerInterface): Option[Timer] = { metrics.getTimerForListenerClass(listener.getClass.asSubclass(classOf[SparkListenerInterface])) } /** * Start sending events to attached listeners. * * This first sends out all buffered events posted before this listener bus has started, then * listens for any additional events asynchronously while the listener bus is still running. * This should only be called once. * * @param sc Used to stop the SparkContext in case the listener thread dies. */ def start(sc: SparkContext, metricsSystem: MetricsSystem): Unit = { if (started.compareAndSet(false, true)) { sparkContext = sc metricsSystem.registerSource(metrics) listenerThread.start() } else { throw new IllegalStateException(s"$name already started!") } } def post(event: SparkListenerEvent): Unit = { if (stopped.get) { // Drop further events to make `listenerThread` exit ASAP logError(s"$name has already stopped! Dropping event $event") return } metrics.numEventsPosted.inc() val eventAdded = eventQueue.offer(event) if (eventAdded) { eventLock.release() } else { onDropEvent(event) } val droppedEvents = droppedEventsCounter.get if (droppedEvents > 0) { // Don't log too frequently if (System.currentTimeMillis() - lastReportTimestamp >= 60 * 1000) { // There may be multiple threads trying to decrease droppedEventsCounter. // Use "compareAndSet" to make sure only one thread can win. // And if another thread is increasing droppedEventsCounter, "compareAndSet" will fail and // then that thread will update it. if (droppedEventsCounter.compareAndSet(droppedEvents, 0)) { val prevLastReportTimestamp = lastReportTimestamp lastReportTimestamp = System.currentTimeMillis() logWarning(s"Dropped $droppedEvents SparkListenerEvents since " + new java.util.Date(prevLastReportTimestamp)) } } } } /** * For testing only. Wait until there are no more events in the queue, or until the specified * time has elapsed. Throw `TimeoutException` if the specified time elapsed before the queue * emptied. * Exposed for testing. */ @throws(classOf[TimeoutException]) def waitUntilEmpty(timeoutMillis: Long): Unit = { val finishTime = System.currentTimeMillis + timeoutMillis while (!queueIsEmpty) { if (System.currentTimeMillis > finishTime) { throw new TimeoutException( s"The event queue is not empty after $timeoutMillis milliseconds") } /* Sleep rather than using wait/notify, because this is used only for testing and * wait/notify add overhead in the general case. */ Thread.sleep(10) } } /** * For testing only. Return whether the listener daemon thread is still alive. * Exposed for testing. */ def listenerThreadIsAlive: Boolean = listenerThread.isAlive /** * Return whether the event queue is empty. * * The use of synchronized here guarantees that all events that once belonged to this queue * have already been processed by all attached listeners, if this returns true. */ private def queueIsEmpty: Boolean = synchronized { eventQueue.isEmpty && !processingEvent } /** * Stop the listener bus. It will wait until the queued events have been processed, but drop the * new events after stopping. */ def stop(): Unit = { if (!started.get()) { throw new IllegalStateException(s"Attempted to stop $name that has not yet started!") } if (stopped.compareAndSet(false, true)) { // Call eventLock.release() so that listenerThread will poll `null` from `eventQueue` and know // `stop` is called. eventLock.release() listenerThread.join() } else { // Keep quiet } } /** * If the event queue exceeds its capacity, the new events will be dropped. The subclasses will be * notified with the dropped events. * * Note: `onDropEvent` can be called in any thread. */ def onDropEvent(event: SparkListenerEvent): Unit = { metrics.numDroppedEvents.inc() droppedEventsCounter.incrementAndGet() if (logDroppedEvent.compareAndSet(false, true)) { // Only log the following message once to avoid duplicated annoying logs. logError("Dropping SparkListenerEvent because no remaining room in event queue. " + "This likely means one of the SparkListeners is too slow and cannot keep up with " + "the rate at which tasks are being started by the scheduler.") } logTrace(s"Dropping event $event") } } private[spark] object LiveListenerBus { // Allows for Context to check whether stop() call is made within listener thread val withinListenerThread: DynamicVariable[Boolean] = new DynamicVariable[Boolean](false) /** The thread name of Spark listener bus */ val name = "SparkListenerBus" } private[spark] class LiveListenerBusMetrics( conf: SparkConf, queue: LinkedBlockingQueue[_]) extends Source with Logging { override val sourceName: String = "LiveListenerBus" override val metricRegistry: MetricRegistry = new MetricRegistry /** * The total number of events posted to the LiveListenerBus. This is a count of the total number * of events which have been produced by the application and sent to the listener bus, NOT a * count of the number of events which have been processed and delivered to listeners (or dropped * without being delivered). */ val numEventsPosted: Counter = metricRegistry.counter(MetricRegistry.name("numEventsPosted")) /** * The total number of events that were dropped without being delivered to listeners. */ val numDroppedEvents: Counter = metricRegistry.counter(MetricRegistry.name("numEventsDropped")) /** * The amount of time taken to post a single event to all listeners. */ val eventProcessingTime: Timer = metricRegistry.timer(MetricRegistry.name("eventProcessingTime")) /** * The number of messages waiting in the queue. */ val queueSize: Gauge[Int] = { metricRegistry.register(MetricRegistry.name("queueSize"), new Gauge[Int]{ override def getValue: Int = queue.size() }) } // Guarded by synchronization. private val perListenerClassTimers = mutable.Map[String, Timer]() /** * Returns a timer tracking the processing time of the given listener class. * events processed by that listener. This method is thread-safe. */ def getTimerForListenerClass(cls: Class[_ <: SparkListenerInterface]): Option[Timer] = { synchronized { val className = cls.getName val maxTimed = conf.get(LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED) perListenerClassTimers.get(className).orElse { if (perListenerClassTimers.size == maxTimed) { logError(s"Not measuring processing time for listener class $className because a " + s"maximum of $maxTimed listener classes are already timed.") None } else { perListenerClassTimers(className) = metricRegistry.timer(MetricRegistry.name("listenerProcessingTime", className)) perListenerClassTimers.get(className) } } } } }
aokolnychyi/spark
core/src/main/scala/org/apache/spark/scheduler/LiveListenerBus.scala
Scala
apache-2.0
11,425
package com.codacy.client.bitbucket.v2.service import com.codacy.client.bitbucket.v2.BuildStatus import com.codacy.client.bitbucket.client.{BitbucketClient, Request, RequestResponse} import play.api.libs.json._ class BuildStatusServices(client: BitbucketClient) { /* * Gets a commit build status * */ def getBuildStatus(owner: String, repository: String, commit: String, key: String): RequestResponse[BuildStatus] = { val url = s"https://bitbucket.org/api/2.0/repositories/$owner/$repository/commit/$commit/statuses/build/$key" client.execute(Request(url, classOf[BuildStatus])) } /* * Create a build status for a commit * */ def createBuildStatus(owner: String, repository: String, commit: String, buildStatus: BuildStatus): RequestResponse[BuildStatus] = { val url = s"https://bitbucket.org/api/2.0/repositories/$owner/$repository/commit/$commit/statuses/build" val values = Map("state" -> Seq(buildStatus.state.toString), "key" -> Seq(buildStatus.key), "name" -> Seq(buildStatus.name), "url" -> Seq(buildStatus.url), "description" -> Seq(buildStatus.description)) client.postForm(Request(url, classOf[BuildStatus]), values) } /* * Update a build status for a commit * */ def updateBuildStatus(owner: String, repository: String, commit: String, buildStatus: BuildStatus): RequestResponse[BuildStatus] = { val url = s"https://bitbucket.org/api/2.0/repositories/$owner/$repository/commit/$commit/statuses/build/${buildStatus.key}" val payload = Json.obj( "state" -> buildStatus.state, "name" -> buildStatus.name, "url" -> buildStatus.url, "description" -> buildStatus.description ) client.putJson(Request(url, classOf[BuildStatus]), payload) } }
rtfpessoa/bitbucket-scala-client
src/main/scala/com/codacy/client/bitbucket/v2/service/BuildStatusServices.scala
Scala
apache-2.0
1,763
case class Foo1(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo2(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo3(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo4(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo5(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo6(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo7(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo8(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo9(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo10(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo11(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo12(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo13(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo14(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo15(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo16(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo17(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo18(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo19(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo20(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo21(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo22(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo23(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo24(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo25(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo26(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo27(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo28(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo29(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo30(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo31(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo32(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo33(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo34(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo35(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo36(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo37(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo38(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo39(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) case class Foo40(x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) object Test { def stuff() = {} def test(x: Any): Unit = x match { case Foo1(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo2(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo3(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo4(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo5(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo6(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo7(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo9(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo10(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo11(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo12(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo13(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo14(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo15(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo17(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo18(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo19(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo20(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo21(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo22(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo23(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo24(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo25(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo26(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo27(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo28(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo29(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo30(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo31(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo33(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo34(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo35(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo36(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo37(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo38(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo39(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() case Foo40(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) => stuff() } }
som-snytt/dotty
tests/pos/i2903.scala
Scala
apache-2.0
6,660
package de.htwg.zeta.common.models.project.gdsl.shape.geomodel import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Align.Horizontal.HorizontalAlignment import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Align.Vertical.VerticalAlignment case class Align( horizontal: HorizontalAlignment, vertical: VerticalAlignment ) object Align { val default: Align = Align(Horizontal.middle, Vertical.middle) object Horizontal extends Enumeration { type HorizontalAlignment = Value val left, middle, right = Value } object Vertical extends Enumeration { type VerticalAlignment = Value val top, middle, bottom = Value } }
Zeta-Project/zeta
api/common/src/main/scala/de/htwg/zeta/common/models/project/gdsl/shape/geomodel/Align.scala
Scala
bsd-2-clause
667
/* Facsimile: A Discrete-Event Simulation Library Copyright © 2004-2020, Michael J Allen. This file is part of Facsimile. Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see http://www.gnu.org/licenses/lgpl. The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the project home page at: http://facsim.org/ Thank you for your interest in the Facsimile project! IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for inclusion as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If your code fails to comply with the standard, then your patches will be rejected. For further information, please visit the coding standards at: http://facsim.org/Documentation/CodingStandards/ ======================================================================================================================== Scala source file from the org.facsim.anim.cell.test package. */ package org.facsim.anim.cell.test import org.facsim.anim.cell.RotationOrder import org.scalatest.FunSpec import scalafx.geometry.Point3D import scalafx.scene.transform.Rotate /** Test suite for the [[org.facsim.anim.cell.RotationOrder$]] object. */ class RotationOrderTest extends FunSpec { /* Test data. */ trait TestData { val validCodes = RotationOrder.minValue to RotationOrder.maxValue val validRotationOrders = Vector[RotationOrder.Value]( RotationOrder.XYZ, RotationOrder.XZY, RotationOrder.YXZ, RotationOrder.YZX, RotationOrder.ZXY, RotationOrder.ZYX ) val validFXSequences = Vector[List[Point3D]]( List(Rotate.XAxis, Rotate.YAxis, Rotate.ZAxis), List(Rotate.XAxis, Rotate.ZAxis, Rotate.YAxis), List(Rotate.YAxis, Rotate.XAxis, Rotate.ZAxis), List(Rotate.YAxis, Rotate.ZAxis, Rotate.XAxis), List(Rotate.ZAxis, Rotate.XAxis, Rotate.YAxis), List(Rotate.ZAxis, Rotate.YAxis, Rotate.XAxis) ) val invalidCodes = List(Int.MinValue, RotationOrder.minValue - 1, RotationOrder.maxValue + 1, Int.MaxValue) } /* Test fixture description. */ describe(RotationOrder.getClass.getCanonicalName) { /* Test the apply function works as expected. */ describe(".apply(Int)") { new TestData { it("must throw a NoSuchElementException if passed an " + "invalid rotation order code") { invalidCodes.foreach { code => intercept[NoSuchElementException] { RotationOrder(code) } } } it("must return the correct rotation order if passed a valid " + "rotation order code") { validCodes.foreach { code => assert(RotationOrder(code) === validRotationOrders(code)) } } } } /* Test the verify function works as expected. */ describe(".verify(Int)") { new TestData { it("must return false if passed an invalid rotation order code") { invalidCodes.foreach { code => assert(RotationOrder.verify(code) === false) } } it("must return true if passed a valid rotation order code") { validCodes.foreach { code => assert(RotationOrder.verify(code) === true) } } } } /* Test that the toAxisSequence function works as expected. */ describe(".toAxisSequence(RotationOrder.Value)") { new TestData { it("must return correct associated ScalaFX axis sequence") { RotationOrder.values.foreach { rotationOrder => assert(RotationOrder.toAxisSequence(rotationOrder) === validFXSequences(rotationOrder.id)) } } } } } }
MichaelJAllen/facsimile
core/src/test/scala/org/facsim/anim/cell/test/RotationOrderTest.scala
Scala
lgpl-3.0
4,382
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.util.collection import java.util.Comparator import scala.collection.mutable.HashSet import org.scalatest.FunSuite class AppendOnlyMapSuite extends FunSuite { test("initialization") { val goodMap1 = new AppendOnlyMap[Int, Int](1) assert(goodMap1.size === 0) val goodMap2 = new AppendOnlyMap[Int, Int](255) assert(goodMap2.size === 0) val goodMap3 = new AppendOnlyMap[Int, Int](256) assert(goodMap3.size === 0) intercept[IllegalArgumentException] { new AppendOnlyMap[Int, Int](1 << 30) // Invalid map size: bigger than 2^29 } intercept[IllegalArgumentException] { new AppendOnlyMap[Int, Int](-1) } intercept[IllegalArgumentException] { new AppendOnlyMap[Int, Int](0) } } test("object keys and values") { val map = new AppendOnlyMap[String, String]() for (i <- 1 to 100) { map("" + i) = "" + i } assert(map.size === 100) for (i <- 1 to 100) { assert(map("" + i) === "" + i) } assert(map("0") === null) assert(map("101") === null) assert(map(null) === null) val set = new HashSet[(String, String)] for ((k, v) <- map) { // Test the foreach method set += ((k, v)) } assert(set === (1 to 100).map(_.toString).map(x => (x, x)).toSet) } test("primitive keys and values") { val map = new AppendOnlyMap[Int, Int]() for (i <- 1 to 100) { map(i) = i } assert(map.size === 100) for (i <- 1 to 100) { assert(map(i) === i) } assert(map(0) === null) assert(map(101) === null) val set = new HashSet[(Int, Int)] for ((k, v) <- map) { // Test the foreach method set += ((k, v)) } assert(set === (1 to 100).map(x => (x, x)).toSet) } test("null keys") { val map = new AppendOnlyMap[String, String]() for (i <- 1 to 100) { map("" + i) = "" + i } assert(map.size === 100) assert(map(null) === null) map(null) = "hello" assert(map.size === 101) assert(map(null) === "hello") } test("null values") { val map = new AppendOnlyMap[String, String]() for (i <- 1 to 100) { map("" + i) = null } assert(map.size === 100) assert(map("1") === null) assert(map(null) === null) assert(map.size === 100) map(null) = null assert(map.size === 101) assert(map(null) === null) } test("changeValue") { val map = new AppendOnlyMap[String, String]() for (i <- 1 to 100) { map("" + i) = "" + i } assert(map.size === 100) for (i <- 1 to 100) { val res = map.changeValue("" + i, (hadValue, oldValue) => { assert(hadValue === true) assert(oldValue === "" + i) oldValue + "!" }) assert(res === i + "!") } // Iterate from 101 to 400 to make sure the map grows a couple of times, because we had a // bug where changeValue would return the wrong result when the map grew on that insert for (i <- 101 to 400) { val res = map.changeValue("" + i, (hadValue, oldValue) => { assert(hadValue === false) i + "!" }) assert(res === i + "!") } assert(map.size === 400) assert(map(null) === null) map.changeValue(null, (hadValue, oldValue) => { assert(hadValue === false) "null!" }) assert(map.size === 401) map.changeValue(null, (hadValue, oldValue) => { assert(hadValue === true) assert(oldValue === "null!") "null!!" }) assert(map.size === 401) } test("inserting in capacity-1 map") { val map = new AppendOnlyMap[String, String](1) for (i <- 1 to 100) { map("" + i) = "" + i } assert(map.size === 100) for (i <- 1 to 100) { assert(map("" + i) === "" + i) } } test("destructive sort") { val map = new AppendOnlyMap[String, String]() for (i <- 1 to 100) { map("" + i) = "" + i } map.update(null, "happy new year!") try { map.apply("1") map.update("1", "2013") map.changeValue("1", (hadValue, oldValue) => "2014") map.iterator } catch { case e: IllegalStateException => fail() } val it = map.destructiveSortedIterator(new Comparator[(String, String)] { def compare(kv1: (String, String), kv2: (String, String)): Int = { val x = if (kv1 != null && kv1._1 != null) kv1._1.toInt else Int.MinValue val y = if (kv2 != null && kv2._1 != null) kv2._1.toInt else Int.MinValue x.compareTo(y) } }) // Should be sorted by key assert(it.hasNext) var previous = it.next() assert(previous == (null, "happy new year!")) previous = it.next() assert(previous == ("1", "2014")) while (it.hasNext) { val kv = it.next() assert(kv._1.toInt > previous._1.toInt) previous = kv } // All subsequent calls to apply, update, changeValue and iterator should throw exception intercept[AssertionError] { map.apply("1") } intercept[AssertionError] { map.update("1", "2013") } intercept[AssertionError] { map.changeValue("1", (hadValue, oldValue) => "2014") } intercept[AssertionError] { map.iterator } } }
sryza/spark
core/src/test/scala/org/apache/spark/util/collection/AppendOnlyMapSuite.scala
Scala
apache-2.0
5,974
/** * This file is part of the TA Buddy project. * Copyright (c) 2014 Alexey Aksenov ezh@ezh.msk.ru * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Affero General Global License version 3 * as published by the Free Software Foundation with the addition of the * following permission added to Section 15 as permitted in Section 7(a): * FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED * BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS», * Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS * THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Global License for more details. * You should have received a copy of the GNU Affero General Global License * along with this program; if not, see http://www.gnu.org/licenses or write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA, 02110-1301 USA, or download the license from the following URL: * http://www.gnu.org/licenses/agpl.html * * The interactive user interfaces in modified source and object code versions * of this program must display Appropriate Legal Notices, as required under * Section 5 of the GNU Affero General Global License. * * In accordance with Section 7(b) of the GNU Affero General Global License, * you must retain the producer line in every report, form or document * that is created or manipulated using TA Buddy. * * You can be released from the requirements of the license by purchasing * a commercial license. Buying such a license is mandatory as soon as you * develop commercial activities involving the TA Buddy software without * disclosing the source code of your own applications. * These activities include: offering paid services to customers, * serving files in a web or/and network application, * shipping TA Buddy with a closed source product. * * For more information, please contact Digimead Team at this * address: ezh@ezh.msk.ru */ package org.digimead.tabuddy.desktop.logic.ui.preference import java.util.ArrayList import org.digimead.tabuddy.desktop.core.Preferences import org.digimead.tabuddy.desktop.core.definition.IPreferencePage import org.digimead.tabuddy.desktop.core.ui.UI import org.digimead.tabuddy.desktop.core.ui.support.DecoratingStyledCellLabelProviderExt import org.digimead.tabuddy.desktop.logic.Default import org.digimead.tabuddy.desktop.logic.payload.marker.serialization.signature.Validator import org.digimead.tabuddy.desktop.logic.payload.marker.serialization.signature.api.XValidator import org.eclipse.jface.preference.{ FieldEditor ⇒ JFieldEditor, FieldEditorPreferencePage, PreferenceManager, PreferenceStore } import org.eclipse.jface.viewers.{ ArrayContentProvider, ColumnLabelProvider, ColumnViewer, ColumnViewerToolTipSupport, DecorationOverlayIcon } import org.eclipse.jface.viewers.{ IDecoration, IDecorationContext, ILabelProviderListener, LabelDecorator, StyledString, TableViewer, TableViewerColumn, ViewerCell } import org.eclipse.jface.viewers.DelegatingStyledCellLabelProvider.IStyledLabelProvider import org.eclipse.jface.window.ToolTip import org.eclipse.swt.SWT import org.eclipse.swt.graphics.Image import org.eclipse.swt.layout.{ FillLayout, GridData, RowLayout } import org.eclipse.swt.widgets.{ Button, Composite, Event, Text } import org.eclipse.ui.{ ISharedImages, PlatformUI } import org.eclipse.ui.internal.WorkbenchPlugin /** * Signature validator preference page. */ class SignatureValidator extends FieldEditorPreferencePage with IPreferencePage { protected val pagePreferenceStore = new PreferenceStore def createFieldEditors() { setPreferenceStore(pagePreferenceStore) addField(new SignatureValidator.FieldEditor("SignatureValidator", "Signature validator:", getFieldEditorParent())) } /** Register this page in a preference manager. */ def register(pmgr: PreferenceManager) = pmgr.addToRoot(Preferences.Node("SignatureValidator", "Signature validator", None)(() ⇒ new SignatureValidator)) } object SignatureValidator { /** * Signature validator field editor. */ class FieldEditor(name: String, labelText: String, parent: Composite) extends { /** Add button control. */ protected var addButton = Option.empty[Button] /** Table viewer control. */ protected var tableViewer = Option.empty[TableViewer] /** Remove button control. */ protected var removeButton = Option.empty[Button] } with JFieldEditor(name, labelText, parent) { def adjustForNumColumns(numColumns: Int) = for { tableViewer ← tableViewer } { val gd = tableViewer.getControl().getLayoutData().asInstanceOf[GridData] if (numColumns > 2) gd.horizontalSpan = numColumns - 2 else gd.horizontalSpan = 1 // We only grab excess space if we have to // If another field editor has more columns then // we assume it is setting the width. gd.grabExcessHorizontalSpace = gd.horizontalSpan == 1 } def doFillIntoGrid(parent: Composite, numColumns: Int) { val table = getTableControl(parent) val add = getButtons(parent) adjustForNumColumns(numColumns) } def doLoad() = for { tableViewer ← tableViewer column ← tableViewer.getTable().getColumns().headOption model = tableViewer.getInput().asInstanceOf[ArrayList[XValidator]] } { Validator.validators().toSeq.sortBy(_.name.name).foreach(model.add) tableViewer.refresh() if (tableViewer.getTable().getBounds().width > 0) UI.adjustViewerColumnWidth(tableViewer.getTable(), column, Default.columnPadding) else column.pack() } def doLoadDefault() = for { tableViewer ← tableViewer model = tableViewer.getInput().asInstanceOf[ArrayList[XValidator]] } Validator.validators().toSeq.sortBy(_.name.name).foreach(model.add) def doStore() { } def getNumberOfControls() = 2 protected def getTableControl(parent: Composite): TableViewer = tableViewer getOrElse { val viewer = new TableViewer(parent, SWT.MULTI | SWT.H_SCROLL | SWT.V_SCROLL) val viewerColumn = new TableViewerColumn(viewer, SWT.NONE) viewerColumn.setLabelProvider(new DecoratingStyledCellLabelProviderExt(new ViewLabelProvider(), new ViewLabelDecorator(), null)) viewer.setContentProvider(ArrayContentProvider.getInstance()) viewer.setInput(new ArrayList[XValidator]()) ViewColumnViewerToolTipSupport.enableFor(viewer) viewer.getControl().setLayoutData(new GridData(SWT.FILL, SWT.FILL, false, false, 1, 1)) this.tableViewer = Option(viewer) viewer } protected def getButtons(parent: Composite): (Button, Button) = { for { addButton ← addButton removeButton ← removeButton } yield (addButton, removeButton) } getOrElse { val container = new Composite(parent, SWT.NONE) container.setLayoutData(new GridData(SWT.LEFT, SWT.TOP, false, false, 1, 1)) val fillLayout = new FillLayout() fillLayout.`type` = SWT.VERTICAL container.setLayout(fillLayout) val addButton = new Button(container, SWT.NONE) addButton.setText("Add") val removeButton = new Button(container, SWT.NONE) removeButton.setText("Remove") this.addButton = Option(addButton) this.removeButton = Option(removeButton) (addButton, removeButton) } } class ViewLabelProvider extends ColumnLabelProvider with IStyledLabelProvider { override def getStyledText(element: AnyRef): StyledString = new StyledString(getText(element)) override def getText(element: AnyRef) = element match { case validator: XValidator ⇒ s"${validator.name.name.capitalize} - ${validator.description.capitalize}" case unknown ⇒ super.getText(unknown) } override def getImage(obj: Object) = PlatformUI.getWorkbench().getSharedImages().getImage(ISharedImages.IMG_OBJ_ELEMENT) override def getToolTipText(element: AnyRef): String = getText(element) + ", shown in a tooltip" } class ViewLabelDecorator extends LabelDecorator { private val warningImageDescriptor = WorkbenchPlugin.getDefault().getSharedImages().getImageDescriptor(ISharedImages.IMG_TOOL_BACK_DISABLED) //WorkbenchImages.getImageDescriptor(IWorkbenchGraphicConstants.IMG_LCL_PIN_VIEW) private var decoratedImage: Image = null override def decorateImage(image: Image, element: AnyRef, context: IDecorationContext): Image = element match { case element: XValidator ⇒ if (decoratedImage == null) { decoratedImage = (new DecorationOverlayIcon(image, warningImageDescriptor, IDecoration.BOTTOM_RIGHT)).createImage() } return decoratedImage; case other ⇒ null } override def dispose() { decoratedImage.dispose() decoratedImage = null } override def decorateText(text: String, element: AnyRef, context: IDecorationContext): String = null override def prepareDecoration(element: AnyRef, originalText: String, context: IDecorationContext) = false override def decorateImage(image: Image, element: AnyRef): Image = null override def decorateText(text: String, element: AnyRef): String = null override def addListener(listener: ILabelProviderListener) {} override def isLabelProperty(element: AnyRef, property: String): Boolean = false override def removeListener(listener: ILabelProviderListener) {} } class ViewColumnViewerToolTipSupport(viewer: ColumnViewer, style: Int, manualActivation: Boolean) extends ColumnViewerToolTipSupport(viewer, style, manualActivation) { override protected def createViewerToolTipContentArea(event: Event, cell: ViewerCell, parent: Composite): Composite = { val composite = new Composite(parent, SWT.NONE); composite.setLayout(new RowLayout(SWT.VERTICAL)); val text = new Text(composite, SWT.SINGLE); text.setText(getText(event)); text.setSize(100, 60); val calendar = new Button(composite, SWT.CALENDAR); calendar.setText("111") calendar.setEnabled(false); calendar.setSize(100, 100); composite.pack(); return composite; } } object ViewColumnViewerToolTipSupport { def enableFor(viewer: ColumnViewer) { new ViewColumnViewerToolTipSupport(viewer, ToolTip.NO_RECREATE, false); } } }
digimead/digi-TABuddy-desktop
part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/ui/preference/SignatureValidator.scala
Scala
agpl-3.0
10,631
/* Copyright 2009-2016 EPFL, Lausanne */ package leon package utils object Graphs { trait EdgeLike[Node] { def _1: Node def _2: Node } case class SimpleEdge[Node](_1: Node, _2: Node) extends EdgeLike[Node] case class LabeledEdge[Label, Node](_1: Node, l: Label, _2: Node) extends EdgeLike[Node] trait DiGraphLike[Node, Edge <: EdgeLike[Node], G <: DiGraphLike[Node, Edge, G]] { // The vertices def N: Set[Node] // The edges def E: Set[Edge] // Returns the set of incoming edges for a given vertex def inEdges(n: Node) = E.filter(_._2 == n) // Returns the set of outgoing edges for a given vertex def outEdges(n: Node) = E.filter(_._1 == n) // Returns the set of edges between two vertices def edgesBetween(from: Node, to: Node) = { E.filter(e => e._1 == from && e._2 == to) } // Adds a new vertex def + (n: Node): G // Adds new vertices def ++ (ns: Traversable[Node]): G // Adds a new edge def + (e: Edge): G // Removes a vertex from the graph def - (from: Node): G // Removes a number of vertices from the graph def -- (from: Traversable[Node]): G // Removes an edge from the graph def - (from: Edge): G } case class DiGraph[Node, Edge <: EdgeLike[Node]](N: Set[Node] = Set[Node](), E: Set[Edge] = Set[Edge]()) extends DiGraphLike[Node, Edge, DiGraph[Node, Edge]] with DiGraphOps[Node, Edge, DiGraph[Node, Edge]]{ def +(n: Node) = copy(N=N+n) def ++(ns: Traversable[Node]) = copy(N=N++ns) def +(e: Edge) = (this+e._1+e._2).copy(E = E + e) def -(n: Node) = copy(N = N-n, E = E.filterNot(e => e._1 == n || e._2 == n)) def --(ns: Traversable[Node]) = { val toRemove = ns.toSet copy(N = N--ns, E = E.filterNot(e => toRemove.contains(e._1) || toRemove.contains(e._2))) } def -(e: Edge) = copy(E = E-e) } trait DiGraphOps[Node, Edge <: EdgeLike[Node], G <: DiGraphLike[Node, Edge, G]] { this: G => def sources: Set[Node] = { N -- E.map(_._2) } def sinks: Set[Node] = { N -- E.map(_._1) } def stronglyConnectedComponents: DiGraph[Set[Node], SimpleEdge[Set[Node]]] = { // Tarjan's algorithm var index = 0 var stack = List[Node]() var indexes = Map[Node, Int]() var lowlinks = Map[Node, Int]() var onStack = Set[Node]() var nodesToScc = Map[Node, Set[Node]]() var res = DiGraph[Set[Node], SimpleEdge[Set[Node]]]() def strongConnect(n: Node): Unit = { indexes += n -> index lowlinks += n -> index index += 1 stack = n :: stack onStack += n for (m <- succ(n)) { if (!(indexes contains m)) { strongConnect(m) lowlinks += n -> (lowlinks(n) min lowlinks(m)) } else if (onStack(m)) { lowlinks += n -> (lowlinks(n) min indexes(m)) } } if (lowlinks(n) == indexes(n)) { val i = stack.indexOf(n)+1 val ns = stack.take(i) stack = stack.drop(i) val scc = ns.toSet onStack --= ns nodesToScc ++= ns.map(n => n -> scc) res += scc } } for (n <- N if !(indexes contains n)) { strongConnect(n) } for (e <- E) { val s1 = nodesToScc(e._1) val s2 = nodesToScc(e._2) if (s1 != s2) { res += SimpleEdge(s1, s2) } } res } def topSort: Seq[Node] = { var res = List[Node]() var temp = Set[Node]() var perm = Set[Node]() def visit(n: Node) { if (temp(n)) { throw new IllegalArgumentException("Graph is not a DAG") } else if (!perm(n)) { temp += n for (n2 <- succ(n)) { visit(n2) } perm += n temp -= n res ::= n } } for (n <- N if !temp(n) && !perm(n)) { visit(n) } res } def depthFirstSearch(from: Node)(f: Node => Unit): Unit = { var visited = Set[Node]() val stack = new collection.mutable.Stack[Node]() stack.push(from) while(stack.nonEmpty) { val n = stack.pop visited += n f(n) for (n2 <- succ(n) if !visited(n2)) { stack.push(n2) } } } def fold[T](from: Node)( follow: Node => Traversable[Node], map: Node => T, compose: List[T] => T): T = { var visited = Set[Node]() def visit(n: Node): T = { visited += n val toFollow = follow(n).filterNot(visited) visited ++= toFollow compose(map(n) :: toFollow.toList.map(visit)) } compose(follow(from).toList.map(visit)) } def succ(from: Node): Set[Node] = { outEdges(from).map(_._2) } def pred(from: Node): Set[Node] = { inEdges(from).map(_._1) } def transitiveSucc(from: Node): Set[Node] = { fold[Set[Node]](from)( succ, Set(_), _.toSet.flatten ) } def transitivePred(from: Node): Set[Node] = { fold[Set[Node]](from)( pred, Set(_), _.toSet.flatten ) } def breadthFirstSearch(from: Node)(f: Node => Unit): Unit = { var visited = Set[Node]() val queue = new collection.mutable.Queue[Node]() queue += from while(queue.nonEmpty) { val n = queue.dequeue visited += n f(n) for (n2 <- succ(n) if !visited(n2)) { queue += n2 } } } } }
epfl-lara/leon
src/main/scala/leon/utils/Graphs.scala
Scala
gpl-3.0
5,589
/* * Copyright 2013 Stephan Rehfeld * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package test.scaladelray.geometry import org.scalatest.FunSpec import scaladelray.geometry.{Node, Hit, Geometry} import scaladelray.math._ import scaladelray.math.Ray import scaladelray.math.Vector3 import scaladelray.math.Point3 class NodeTestGeometry( t : Transform, r : Ray, hits : Hit* ) extends Geometry( MaterialTestAdapter() ) { var called = false override def <--(r: Ray): Set[Hit] = { assert( r == (t * this.r) ) called = true hits.toSet } } class NodeSpec extends FunSpec { describe( "A node" ) { it( "should apply the transform to the ray, collect the hits from all object and transform back the normal" ) { val t = Transform.translate( 2, 3, 5 ).rotateX( 7 ) val r = Ray( Point3( 5, 3, 2 ), Vector3( 7, 11, 13 ) ) val g = new NodeTestGeometry( t, r, Hit( r, null, 1, Normal3( 1, 0, 0 ), null ), Hit( r, null, 1, Normal3( 0, 1, 0 ), null ), Hit( r, null, 1, Normal3( 0, 0, 1 ), null ) ) val n = new Node( t, g ) val hits = n <-- r assert( hits.size == 3 ) assert( g.called ) assert( hits.exists( _.n == t * Normal3( 1, 0, 0 ) ) ) assert( hits.exists( _.n == t * Normal3( 0, 1, 0 ) ) ) assert( hits.exists( _.n == t * Normal3( 0, 0, 1 ) ) ) } } }
stephan-rehfeld/scaladelray
src/test/scala/test/scaladelray/geometry/NodeSpec.scala
Scala
apache-2.0
1,859
package com.overviewdocs.searchindex import scala.collection.immutable import scala.concurrent.Future import com.overviewdocs.query.Query import com.overviewdocs.models.Document /** Interacts with a search index. * * A search index is a program that stores all documents. The documents are * grouped by document set ID. * * Indexing is asynchronous and presumably none too quick. That's why all * indexings are bulk requests. * * Searching is very fast. Read about the syntax at * http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html * * The _database_ is authoritative, not the search index. So the search index * returns IDs which can then be used to query the database. */ trait IndexClient { /** Adds a DocumentSet. * * This is for sharding and routing: after calling this method, Documents * within this DocumentSet will be routed to a given shard. So don't add * documents until you've done this. * * If the document set is already added, this is a no-op. */ def addDocumentSet(id: Long): Future[Unit] /** Removes a DocumentSet. * * Removes all documents with the given DocumentSet ID from the search * index, then removes the DocumentSet metadata. * * If the document set doesn't exist in the search index, this is a no-op. */ def removeDocumentSet(id: Long): Future[Unit] /** Adds Documents. * * Be sure to call addDocumentSet() for all relevant document sets before * you call this. * * Any documents with the same ID in the search index will be duplicated. Use * updateDocuments() in this case. * * After this method succeeds, the documents are searchable right away. */ def addDocuments(documentSetId: Long, documents: immutable.Seq[Document]): Future[Unit] /** Deletes and then re-adds Documents. * * After this method succeeds, the documents are searchable right away. * However, term vectors will be slightly inaccurate. */ def updateDocuments(documentSetId: Long, documents: immutable.Seq[Document]): Future[Unit] /** Returns IDs for matching documents. * * @param documentSetId Document set ID * @param q Search string */ def searchForIds(documentSetId: Long, q: Query): Future[SearchResult] /** Finds all highlights of a given query in a document. * * @param documentSetId Document set ID (says which alias to search under) * @param documentId Document ID * @param q Search string */ def highlight(documentSetId: Long, documentId: Long, q: Query): Future[immutable.Seq[Utf16Highlight]] def highlights(documentSetId: Long, documentIds: immutable.Seq[Long], q: Query): Future[Map[Long, immutable.Seq[Utf16Snippet]]] /** Guarantees all past added documents are searchable. */ def refresh(documentSetId: Long): Future[Unit] /** Wipes the database -- BE CAREFUL! * * Useful in test suites. */ def deleteAllIndices: Future[Unit] }
overview/overview-server
worker/src/main/scala/com/overviewdocs/searchindex/IndexClient.scala
Scala
agpl-3.0
3,016
package scala.xml import org.junit.Test import org.junit.Assert.assertEquals class XMLSyntaxTestJVM { @Test def test3(): Unit = { // this demonstrates how to handle entities val s = io.Source.fromString("<a>&nbsp;</a>") object parser extends xml.parsing.ConstructingParser(s, false /*ignore ws*/) { override def replacementText(entityName: String): io.Source = { entityName match { case "nbsp" => io.Source.fromString("\\u0160"); case _ => super.replacementText(entityName); } } nextch(); // !!important, to initialize the parser } val parsed = parser.element(TopScope) // parse the source as element // alternatively, we could call document() assertEquals("<a>Š</a>", parsed.toString) } }
scala/scala-xml
jvm/src/test/scala/scala/xml/XMLSyntaxTest.scala
Scala
apache-2.0
782
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.parquet import java.util.{Optional, PrimitiveIterator} import scala.collection.mutable.ArrayBuffer import scala.language.implicitConversions import org.apache.parquet.column.{ColumnDescriptor, ParquetProperties} import org.apache.parquet.column.impl.ColumnWriteStoreV1 import org.apache.parquet.column.page._ import org.apache.parquet.column.page.mem.MemPageStore import org.apache.parquet.io.ParquetDecodingException import org.apache.parquet.io.api.Binary import org.apache.parquet.schema.{MessageType, MessageTypeParser} import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName import org.apache.spark.memory.MemoryMode import org.apache.spark.sql.{QueryTest, Row} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.RowOrdering import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.ParquetRowGroupReader import org.apache.spark.sql.execution.vectorized.ColumnVectorUtils import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ /** * A test suite on the vectorized Parquet reader. Unlike `ParquetIOSuite`, this focuses on * low-level decoding logic covering column index, dictionary, different batch and page sizes, etc. */ class ParquetVectorizedSuite extends QueryTest with ParquetTest with SharedSparkSession { private val VALUES: Seq[String] = ('a' to 'z').map(_.toString) private val NUM_VALUES: Int = VALUES.length private val BATCH_SIZE_CONFIGS: Seq[Int] = Seq(1, 3, 5, 7, 10, 20, 40) private val PAGE_SIZE_CONFIGS: Seq[Seq[Int]] = Seq(Seq(6, 6, 7, 7), Seq(4, 9, 4, 9)) implicit def toStrings(ints: Seq[Int]): Seq[String] = ints.map(i => ('a' + i).toChar.toString) test("primitive type - no column index") { BATCH_SIZE_CONFIGS.foreach { batchSize => PAGE_SIZE_CONFIGS.foreach { pageSizes => Seq(true, false).foreach { dictionaryEnabled => testPrimitiveString(None, None, pageSizes, VALUES, batchSize, dictionaryEnabled = dictionaryEnabled) } } } } test("primitive type - column index with ranges") { BATCH_SIZE_CONFIGS.foreach { batchSize => PAGE_SIZE_CONFIGS.foreach { pageSizes => Seq(true, false).foreach { dictionaryEnabled => var ranges = Seq((0L, 9L)) testPrimitiveString(None, Some(ranges), pageSizes, 0 to 9, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((30, 50)) testPrimitiveString(None, Some(ranges), pageSizes, Seq.empty, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((15, 25)) testPrimitiveString(None, Some(ranges), pageSizes, 15 to 19, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((19, 20)) testPrimitiveString(None, Some(ranges), pageSizes, 19 to 20, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((0, 3), (5, 7), (15, 18)) testPrimitiveString(None, Some(ranges), pageSizes, toStrings(Seq(0, 1, 2, 3, 5, 6, 7, 15, 16, 17, 18)), batchSize, dictionaryEnabled = dictionaryEnabled) } } } } test("primitive type - column index with ranges and nulls") { BATCH_SIZE_CONFIGS.foreach { batchSize => PAGE_SIZE_CONFIGS.foreach { pageSizes => Seq(true, false).foreach { dictionaryEnabled => val valuesWithNulls = VALUES.zipWithIndex.map { case (v, i) => if (i % 2 == 0) null else v } testPrimitiveString(None, None, pageSizes, valuesWithNulls, batchSize, valuesWithNulls, dictionaryEnabled) val ranges = Seq((5L, 7L)) testPrimitiveString(None, Some(ranges), pageSizes, Seq("f", null, "h"), batchSize, valuesWithNulls, dictionaryEnabled) } } } } test("primitive type - column index with ranges and first row indexes") { BATCH_SIZE_CONFIGS.foreach { batchSize => Seq(true, false).foreach { dictionaryEnabled => // Single page val firstRowIndex = 10 var ranges = Seq((0L, 9L)) testPrimitiveString(Some(Seq(firstRowIndex)), Some(ranges), Seq(VALUES.length), Seq.empty, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((15, 25)) testPrimitiveString(Some(Seq(firstRowIndex)), Some(ranges), Seq(VALUES.length), 5 to 15, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((15, 35)) testPrimitiveString(Some(Seq(firstRowIndex)), Some(ranges), Seq(VALUES.length), 5 to 19, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((15, 39)) testPrimitiveString(Some(Seq(firstRowIndex)), Some(ranges), Seq(VALUES.length), 5 to 19, batchSize, dictionaryEnabled = dictionaryEnabled) // Row indexes: [ [10, 16), [20, 26), [30, 37), [40, 47) ] // Values: [ [0, 6), [6, 12), [12, 19), [19, 26) ] var pageSizes = Seq(6, 6, 7, 7) var firstRowIndexes = Seq(10L, 20, 30, 40) ranges = Seq((0L, 9L)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, Seq.empty, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((15, 25)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, 5 to 9, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((15, 35)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, 5 to 14, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((15, 60)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, 5 to 19, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((12, 22), (28, 38)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, toStrings(Seq(2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17, 18)), batchSize, dictionaryEnabled = dictionaryEnabled) // Row indexes: [ [10, 11), [40, 52), [100, 112), [200, 201) ] // Values: [ [0, 1), [1, 13), [13, 25), [25, 26] ] pageSizes = Seq(1, 12, 12, 1) firstRowIndexes = Seq(10L, 40, 100, 200) ranges = Seq((0L, 9L)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, Seq.empty, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((300, 350)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, Seq.empty, batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((50, 80)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, (11 to 12), batchSize, dictionaryEnabled = dictionaryEnabled) ranges = Seq((0, 150)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, 0 to 24, batchSize, dictionaryEnabled = dictionaryEnabled) // with nulls val valuesWithNulls = VALUES.zipWithIndex.map { case (v, i) => if (i % 2 == 0) null else v } ranges = Seq((20, 45)) // select values in [1, 5] testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, Seq("b", null, "d", null, "f"), batchSize, valuesWithNulls, dictionaryEnabled = dictionaryEnabled) ranges = Seq((8, 12), (80, 104)) testPrimitiveString(Some(firstRowIndexes), Some(ranges), pageSizes, Seq(null, "n", null, "p", null, "r"), batchSize, valuesWithNulls, dictionaryEnabled = dictionaryEnabled) } } } private def testPrimitiveString( firstRowIndexesOpt: Option[Seq[Long]], rangesOpt: Option[Seq[(Long, Long)]], pageSizes: Seq[Int], expectedValues: Seq[String], batchSize: Int, inputValues: Seq[String] = VALUES, dictionaryEnabled: Boolean = false): Unit = { assert(pageSizes.sum == inputValues.length) firstRowIndexesOpt.foreach(a => assert(pageSizes.length == a.length)) val isRequiredStr = if (!expectedValues.contains(null)) "required" else "optional" val parquetSchema: MessageType = MessageTypeParser.parseMessageType( s"""message root { | $isRequiredStr binary a(UTF8); |} |""".stripMargin ) val maxDef = if (inputValues.contains(null)) 1 else 0 val ty = parquetSchema.asGroupType().getType("a").asPrimitiveType() val cd = new ColumnDescriptor(Seq("a").toArray, ty, 0, maxDef) val repetitionLevels = Array.fill[Int](inputValues.length)(0) val definitionLevels = inputValues.map(v => if (v == null) 0 else 1) val memPageStore = new MemPageStore(expectedValues.length) var i = 0 val pageFirstRowIndexes = ArrayBuffer.empty[Long] pageSizes.foreach { size => pageFirstRowIndexes += i writeDataPage(cd, memPageStore, repetitionLevels.slice(i, i + size), definitionLevels.slice(i, i + size), inputValues.slice(i, i + size), maxDef, dictionaryEnabled) i += size } checkAnswer(expectedValues.length, parquetSchema, TestPageReadStore(memPageStore, firstRowIndexesOpt.getOrElse(pageFirstRowIndexes).toSeq, rangesOpt), expectedValues.map(i => Row(i)), batchSize) } /** * Write a single data page using repetition levels, definition levels and values provided. * * Note that this requires `repetitionLevels`, `definitionLevels` and `values` to have the same * number of elements. For null values, the corresponding slots in `values` will be skipped. */ private def writeDataPage( columnDesc: ColumnDescriptor, pageWriteStore: PageWriteStore, repetitionLevels: Seq[Int], definitionLevels: Seq[Int], values: Seq[Any], maxDefinitionLevel: Int, dictionaryEnabled: Boolean = false): Unit = { val columnWriterStore = new ColumnWriteStoreV1(pageWriteStore, ParquetProperties.builder() .withPageSize(4096) .withDictionaryEncoding(dictionaryEnabled) .build()) val columnWriter = columnWriterStore.getColumnWriter(columnDesc) repetitionLevels.zip(definitionLevels).zipWithIndex.foreach { case ((rl, dl), i) => if (dl < maxDefinitionLevel) { columnWriter.writeNull(rl, dl) } else { columnDesc.getPrimitiveType.getPrimitiveTypeName match { case PrimitiveTypeName.INT32 => columnWriter.write(values(i).asInstanceOf[Int], rl, dl) case PrimitiveTypeName.INT64 => columnWriter.write(values(i).asInstanceOf[Long], rl, dl) case PrimitiveTypeName.BOOLEAN => columnWriter.write(values(i).asInstanceOf[Boolean], rl, dl) case PrimitiveTypeName.FLOAT => columnWriter.write(values(i).asInstanceOf[Float], rl, dl) case PrimitiveTypeName.DOUBLE => columnWriter.write(values(i).asInstanceOf[Double], rl, dl) case PrimitiveTypeName.BINARY => columnWriter.write(Binary.fromString(values(i).asInstanceOf[String]), rl, dl) case _ => throw new IllegalStateException(s"Unexpected type: " + s"${columnDesc.getPrimitiveType.getPrimitiveTypeName}") } } columnWriterStore.endRecord() } columnWriterStore.flush() } private def checkAnswer( totalRowCount: Int, fileSchema: MessageType, readStore: PageReadStore, expected: Seq[Row], batchSize: Int = NUM_VALUES): Unit = { import collection.JavaConverters._ val recordReader = new VectorizedParquetRecordReader( DateTimeUtils.getZoneId("EST"), "CORRECTED", "UTC", "CORRECTED", "UTC", true, batchSize) recordReader.initialize(fileSchema, fileSchema, TestParquetRowGroupReader(Seq(readStore)), totalRowCount) // convert both actual and expected rows into collections val schema = recordReader.sparkSchema val expectedRowIt = ColumnVectorUtils.toBatch( schema, MemoryMode.ON_HEAP, expected.iterator.asJava).rowIterator() val rowOrdering = RowOrdering.createNaturalAscendingOrdering(schema.map(_.dataType)) var i = 0 while (expectedRowIt.hasNext && recordReader.nextKeyValue()) { val expectedRow = expectedRowIt.next() val actualRow = recordReader.getCurrentValue.asInstanceOf[InternalRow] assert(rowOrdering.compare(expectedRow, actualRow) == 0, { val expectedRowStr = toDebugString(schema, expectedRow) val actualRowStr = toDebugString(schema, actualRow) s"at index $i, expected row: $expectedRowStr doesn't match actual row: $actualRowStr" }) i += 1 } } private def toDebugString(schema: StructType, row: InternalRow): String = { if (row == null) "null" else { val fieldStrings = schema.fields.zipWithIndex.map { case (f, i) => f.dataType match { case IntegerType => row.getInt(i).toString case StringType => val utf8Str = row.getUTF8String(i) if (utf8Str == null) "null" else utf8Str.toString case ArrayType(_, _) => val elements = row.getArray(i) if (elements == null) "null" else elements.array.mkString("[", ", ", "]") case _ => throw new IllegalArgumentException(s"Unsupported data type: ${f.dataType}") } } fieldStrings.mkString(", ") } } case class TestParquetRowGroupReader(groups: Seq[PageReadStore]) extends ParquetRowGroupReader { private var index: Int = 0 override def readNextRowGroup(): PageReadStore = { if (index == groups.length) { null } else { val res = groups(index) index += 1 res } } override def close(): Unit = {} } private case class TestPageReadStore( wrapped: PageReadStore, firstRowIndexes: Seq[Long], rowIndexRangesOpt: Option[Seq[(Long, Long)]] = None) extends PageReadStore { override def getPageReader(descriptor: ColumnDescriptor): PageReader = { val originalReader = wrapped.getPageReader(descriptor) TestPageReader(originalReader, firstRowIndexes) } override def getRowCount: Long = wrapped.getRowCount override def getRowIndexes: Optional[PrimitiveIterator.OfLong] = { rowIndexRangesOpt.map { ranges => Optional.of(new PrimitiveIterator.OfLong { private var currentRangeIdx: Int = 0 private var currentRowIdx: Long = -1 override def nextLong(): Long = { if (!hasNext) throw new NoSuchElementException("No more element") val res = currentRowIdx currentRowIdx += 1 res } override def hasNext: Boolean = { while (currentRangeIdx < ranges.length) { if (currentRowIdx > ranges(currentRangeIdx)._2) { // we've exhausted the current range - move to the next range currentRangeIdx += 1 currentRowIdx = -1 } else { if (currentRowIdx == -1) { currentRowIdx = ranges(currentRangeIdx)._1 } return true } } false } }) }.getOrElse(Optional.empty()) } } private case class TestPageReader( wrapped: PageReader, firstRowIndexes: Seq[Long]) extends PageReader { private var index = 0 override def readDictionaryPage(): DictionaryPage = wrapped.readDictionaryPage() override def getTotalValueCount: Long = wrapped.getTotalValueCount override def readPage(): DataPage = { val wrappedPage = try { wrapped.readPage() } catch { case _: ParquetDecodingException => null } if (wrappedPage == null) { wrappedPage } else { val res = new TestDataPage(wrappedPage, firstRowIndexes(index)) index += 1 res } } } }
ueshin/apache-spark
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetVectorizedSuite.scala
Scala
apache-2.0
16,992
package models.service.importer import models.database.facade.ArtistFacade import models.database.facade.api.SoundcloudFacade import models.service.api.discover.MusicBrainzApi import models.util.Constants import play.api.libs.json.JsValue import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class SoundcloudImporter(identifier: Either[Int, String]) extends Importer(identifier, "soundcloud") { def convertJsonToSeq(json: Option[JsValue]):Future[Seq[Map[String,String]]] = { json match { case Some(js) => doJsonConversion(js) case None => throw new Exception(Constants.userTracksRetrievalError) } } def doJsonConversion(js: JsValue): Future[Seq[Map[String, String]]] = { val result = js.as[Seq[JsValue]] val totalLength = result.length Future.sequence { result.zipWithIndex.map { case (entity, i) => val position = i + 1 apiHelper.setRetrievalProcessProgress(position.toDouble / totalLength) val isTrack = (entity \\ "kind").as[String] == "track" if(isTrack) { val id = (entity \\ "id").as[Int] val user = (entity \\ "user").as[JsValue] val artist = (user \\ "username").as[String] val title = (entity \\ "title").as[String] val artistFromDb = ArtistFacade.artistByName(artist) artistFromDb match { case Some(art) => SoundcloudFacade.saveArtistWithServiceId(art.name, id.toString) Future.successful(Some(Map( Constants.mapKeyArtist -> art.name, Constants.mapKeyAlbum -> Constants.unknownAlbum, Constants.mapKeyTrack -> title ))) case None => Thread.sleep(1000) for { musicBrainzResult <- MusicBrainzApi.isKnownArtist(artist) } yield { musicBrainzResult match { case Some(artistName) => SoundcloudFacade.saveArtistWithServiceId(artistName, id.toString) Some(Map(Constants.mapKeyArtist -> artistName, Constants.mapKeyAlbum -> Constants.unknownAlbum, Constants.mapKeyTrack -> title)) case None => None } } } } else Future.successful(None) } } map(x => x filter(y => y.isDefined) map(z => z.get)) } }
haffla/stream-compare
app/models/service/importer/SoundcloudImporter.scala
Scala
gpl-3.0
2,417
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.nisp.models import play.api.libs.json.Json case class SPAgeModel(age: Int, date: NpsDate) object SPAgeModel { implicit val formats = Json.format[SPAgeModel] }
hmrc/nisp-frontend
app/uk/gov/hmrc/nisp/models/SPAgeModel.scala
Scala
apache-2.0
789
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU Affero GPL v3, the copyright holders add the following * Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3 * licence, when you create a Related Module, this Related Module is * not considered as a part of the work and may be distributed under the * license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>. * ************************************************************************************* */ package com.normation.rudder.web.rest import com.normation.rudder.web.model.CurrentUser import net.liftweb.common.EmptyBox import net.liftweb.common.Full import net.liftweb.http.Req import com.normation.eventlog.EventActor import org.apache.commons.codec.binary.Base64 import net.liftweb.json._ import net.liftweb.http._ import net.liftweb.json.JsonDSL._ import net.liftweb.http.js.JsExp import scala.text.Document import net.liftweb.common.Loggable import net.liftweb.common.Box import net.liftweb.common.Failure import net.liftweb.util.Helpers.tryo /** */ object RestUtils extends Loggable { /** * Get the rest user name, as follow: * - if the user is authenticated, use the provided UserName * - else, use the HTTP Header: X-REST-USERNAME * - else, return none */ def getUsername(req:Req) : Option[String] = { CurrentUser.is match { case None => req.header(s"X-REST-USERNAME") match { case eb:EmptyBox => None case Full(name) => Some(name) } case Some(u) => Some(u.getUsername) } } def getActor(req:Req) : EventActor = EventActor(getUsername(req).getOrElse("UnknownRestUser")) def getPrettify(req:Req) : Box[Boolean] = req.params.get("prettify") match { case None => Full(false) case Some("true" :: Nil) => Full(true) case Some("false" :: Nil) => Full(false) case _ => Failure("Prettify should only have one value, and should be set to true or false") } /** * Our own JSON render function to extends net.liftweb.json.JsonAst.render function * All code is taken from JsonAst object from lift-json_2.10-2.5.1.jar (dépendency used in rudder 2.10 at least) * and available at: https://github.com/lift/framework/blob/2.5.1/core/json/src/main/scala/net/liftweb/json/JsonAST.scala#L392 * What we added: * - add a new line after each element in array * - Add a new line at the end and beginning of an array and indent one more level array data * - space after colon */ def render(value: JValue): Document = { import scala.text.{Document, DocText} import scala.text.Document._ // Helper functions, needed but private in JSONAst, // That one modified, add a bref after the punctuate def series(docs: List[Document]) = punctuate(text(",") :: break, docs) // no modification here def trimArr(xs: List[JValue]) = xs.filter(_ != JNothing) def trimObj(xs: List[JField]) = xs.filter(_.value != JNothing) def fields(docs: List[Document]) = punctuate(text(",") :: break, docs) // Indentation changed def punctuate(p: Document, docs: List[Document]): Document = { if (docs.length == 0) { empty } else { docs.reduceLeft((d1, d2) => d1 :: p :: d2) } } def quote(s: String): String = { val buf = new StringBuilder appendEscapedString(buf, s) buf.toString } def appendEscapedString(buf: StringBuilder, s: String) { for (i <- 0 until s.length) { val c = s.charAt(i) buf.append(c match { case '"' => "\\\"" case '\\' => "\\\\" case '\b' => "\\b" case '\f' => "\\f" case '\n' => "\\n" case '\r' => "\\r" case '\t' => "\\t" case c if ((c >= '\u0000' && c < '\u0020')) => "\\u%04x".format(c: Int) case c => c } ) } } // The actual render function // Fallback to JsonAst.render value match { case JArray(arr) => // origin: text("[") :: series(trimArr(arr).map(render)) :: text("]") // We want to break after [ and indent one more level val nested = break :: series(trimArr(arr).map(render)) text("[") :: nest(2, nested) :: break :: text("]") case JField(n, v) => // origin : text("\"" + quote(n) + "\":") :: render(v) // Just add a space after the colon text("\"" + quote(n) + "\": ") :: render(v) case JObject(obj) => // origin: val nested = break :: fields(trimObj(obj).map(f => text("\"" + quote(f.name) + "\":") :: render(f.value))) // Just add a space after the colon val nested = break :: fields(trimObj(obj).map(f => text("\"" + quote(f.name) + "\": ") :: render(f.value))) text("{") :: nest(2, nested) :: break :: text("}") case _ => JsonAST.render(value) } } private[this] def effectiveResponse (id:Option[String], message:JValue, status:HttpStatus, action : String , prettify : Boolean) : LiftResponse = { val printer: Document => String = if (prettify) Printer.pretty else Printer.compact val json = ( "action" -> action ) ~ ( "id" -> id ) ~ ( "result" -> status.status ) ~ ( status.container -> message ) val content : JsExp = new JsExp { lazy val toJsCmd = printer(render((json))) } JsonResponse(content,List(),List(), status.code) } def toJsonResponse(id:Option[String], message:JValue) ( implicit action : String, prettify : Boolean) : LiftResponse = { effectiveResponse (id, message, RestOk, action, prettify) } def toJsonError(id:Option[String], message:JValue)( implicit action : String = "rest", prettify : Boolean) : LiftResponse = { effectiveResponse (id, message, RestError, action, prettify) } def notValidVersionResponse(action:String)(implicit availableVersions : List[Int]) = { val versions = "latest" :: availableVersions.map(_.toString) toJsonError(None, JString(s"Version used does not exists, please use one of the following: ${versions.mkString("[ ", ", ", " ]")} "))(action,false) } def missingResponse(version:Int,action:String) = { toJsonError(None, JString(s"Version ${version} exists for this API function, but it's implementation is missing"))(action,false) } } sealed case class ApiVersion ( value : Int ) object ApiVersion { def fromRequest(req:Req)( implicit availableVersions : List[Int]) : Box[ApiVersion] = { val latest = availableVersions.max def fromString (version : String) : Box[ApiVersion] = { version match { case "latest" => Full(ApiVersion(latest)) case value => tryo { value.toInt } match { case Full(version) => if (availableVersions.contains(version)) { Full(ApiVersion(version)) } else { Failure(s" ${version} is not a valid api version") } // Never empty due to tryo case eb:EmptyBox => eb } } } req.header("X-API-VERSION") match { case Full(value) => fromString(value) case eb: EmptyBox => eb ?~ ("Error when getting header X-API-VERSION") } } } sealed trait HttpStatus { def code : Int def status : String def container : String } object RestOk extends HttpStatus{ val code = 200 val status = "success" val container = "data" } object RestError extends HttpStatus{ val code = 500 val status = "error" val container = "errorDetails" }
Kegeruneku/rudder
rudder-web/src/main/scala/com/normation/rudder/web/rest/RestUtils.scala
Scala
agpl-3.0
8,730
package quizleague.web.model import scala.scalajs.js import scala.scalajs.js.Date import scala.scalajs.js.Any.fromBoolean import scala.scalajs.js.Any.fromString import scala.scalajs.js.annotation.JSExportAll import rxscalajs.Observable object CompetitionType extends Enumeration { type CompetitionType = Value val league, cup, subsidiary, singleton = Value } import CompetitionType._ import scala.scalajs.js.annotation.ScalaJSDefined import quizleague.web.util.rx.RefObservable sealed trait Competition extends Model{ val id: String val name: String val typeName: String val fixtures: Observable[js.Array[Fixtures]] val leaguetable: Observable[js.Array[LeagueTable]] val text: RefObservable[Text] val textName:String val icon:String val subsidiary:Boolean } class LeagueCompetition( override val id: String, override val name: String, val startTime: String, val duration: Float, override val fixtures: Observable[js.Array[Fixtures]], override val leaguetable: Observable[js.Array[LeagueTable]], override val text: RefObservable[Text], override val textName:String, val icon:String) extends Competition { override val typeName = league.toString() override val subsidiary = false } class CupCompetition( override val id: String, override val name: String, val startTime: String, val duration: Float, override val fixtures: Observable[js.Array[Fixtures]], override val text: RefObservable[Text], override val textName:String, val icon:String ) extends Competition { override val typeName = cup.toString() override val leaguetable: Observable[js.Array[LeagueTable]] = Observable.just(js.Array()) override val subsidiary = false } class SubsidiaryLeagueCompetition( override val id: String, override val name: String, override val fixtures: Observable[js.Array[Fixtures]], override val leaguetable: Observable[js.Array[LeagueTable]], override val text: RefObservable[Text], override val textName:String, val icon:String) extends Competition { override val typeName = CompetitionType.subsidiary.toString() override val subsidiary = true } object SubsidiaryLeagueCompetition { def addFixtures(sub:Competition,fixtures: Observable[js.Array[Fixtures]]) = { new SubsidiaryLeagueCompetition(sub.id,sub.name, fixtures, sub.leaguetable, sub.text, sub.textName, sub.icon) } } class SingletonCompetition( override val id: String, override val name: String, override val text: RefObservable[Text], val textName: String, val event: Event, val icon:String) extends Competition { override val typeName = singleton.toString() override val fixtures = Observable.just(js.Array()) override val leaguetable: Observable[js.Array[LeagueTable]] = Observable.just(js.Array()) override val subsidiary = false }
gumdrop/quizleague-maintain
js/src/main/scala/quizleague/web/model/Competition.scala
Scala
mit
2,813
import edu.luc.cs.ui.promptInt object Addition3 { def sumProblemString(x: Int, y: Int): String = { val sum = x + y val sentence = s"The sum of $x and $y is $sum." sentence } def main(args: Array[String]) { println(sumProblemString(2, 3)) println(sumProblemString(12345, 53579)) val a = promptInt("Enter an integer: ", -1) val b = promptInt("Enter another integer: ", -1) println(sumProblemString(a, b)) } }
LoyolaChicagoBooks/introcs-scala-examples
addition3/addition3.scala
Scala
gpl-3.0
450
/* * Copyright (C) 2012 Romain Reuillon * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.openmole.core.batch.authentication import org.openmole.core.workspace.AuthenticationProvider trait CypheredPassword { def cypheredPassword: String def password(implicit authenticationProvider: AuthenticationProvider) = if (cypheredPassword == null || cypheredPassword == "") "" else authenticationProvider.decrypt(cypheredPassword) }
ISCPIF/PSEExperiments
openmole-src/openmole/core/org.openmole.core.batch/src/main/scala/org/openmole/core/batch/authentication/CypheredPassword.scala
Scala
agpl-3.0
1,056
package scalarules.test_expect_failure.plus_one_deps.internal_deps class C
smparkes/rules_scala
test_expect_failure/plus_one_deps/internal_deps/C.scala
Scala
apache-2.0
75
package org.example1.usage import org.example1.declaration._ import org.example1.declaration.data.{X => X_Renamed} trait Usage_NoOther_Imports_Wildcard_WithRename_MovedClass_WithSomeLocalImport { val x1: X_Renamed = ??? val y: Y = ??? val z: Z = ??? def myScope(): Unit = { import org.example1.declaration.data.X val x2: X = ??? } }
JetBrains/intellij-scala
scala/scala-impl/testdata/move/allInOne/after/org/example1/usage/Usage_NoOther_Imports_Wildcard_WithRename_MovedClass_WithSomeLocalImport.scala
Scala
apache-2.0
354
package org.scalarules.dsl.nl.grammar import org.scalarules.dsl.nl.grammar.DslCondition._ import org.scalarules.engine._ import org.scalarules.facts.Fact import org.scalarules.utils.{SourcePosition, SourceUnknown} import scala.language.implicitConversions case class DslCondition(facts: Set[Fact[Any]], condition: Condition, sourcePosition: SourcePosition = SourceUnknown()) { def en[T](rhs: Fact[T]): DslConditionPart[T] = DslConditionPart(this, rhs, andPredicate) def en(rhs: DslCondition): DslCondition = combine(this, rhs, andPredicate) def of[T](rhs: Fact[T]): DslConditionPart[T] = DslConditionPart(this, rhs, orPredicate) def of(rhs: DslCondition): DslCondition = combine(this, rhs, orPredicate) private def combine(lhs: DslCondition, rhs: DslCondition, predicate: ConditionFunction): DslCondition = DslCondition(lhs.facts ++ rhs.facts, predicate(lhs.condition, rhs.condition)) } object DslCondition { val andPredicate: ConditionFunction = (l, r) => c => l(c) && r(c) val orPredicate: ConditionFunction = (l, r) => c => l(c) || r(c) val emptyTrueCondition: DslCondition = DslCondition(Set(), _ => true) def factFilledCondition[A](fact: Fact[A]): DslCondition = DslCondition(Set(fact), Conditions.exists(fact)) def andCombineConditions(initialDslCondition: DslCondition, dslConditions: DslCondition*): DslCondition = dslConditions.foldLeft(initialDslCondition)(_ en _) def orCombineConditions(initialDslCondition: DslCondition, dslConditions: DslCondition*): DslCondition = dslConditions.foldLeft(initialDslCondition)(_ of _) } trait DslConditionImplicits { implicit def toConditionDslPart[T](factDef : Fact[T]): DslConditionPart[T] = DslConditionPart(emptyTrueCondition, factDef, andPredicate) implicit def dslEvaluationToConditionDslPart[T](dslEvaluation: DslEvaluation[T]): DslEvaluationConditionPart[T] = DslEvaluationConditionPart(emptyTrueCondition, dslEvaluation, andPredicate) val altijd: DslCondition = emptyTrueCondition }
scala-rules/rule-engine
engine/src/main/scala/org/scalarules/dsl/nl/grammar/DslCondition.scala
Scala
mit
1,980
/* * Shadowsocks - A shadowsocks client for Android * Copyright (C) 2014 <max.c.lv@gmail.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * * ___====-_ _-====___ * _--^^^#####// \\\\#####^^^--_ * _-^##########// ( ) \\\\##########^-_ * -############// |\\^^/| \\\\############- * _/############// (@::@) \\\\############\\_ * /#############(( \\\\// ))#############\\ * -###############\\\\ (oo) //###############- * -#################\\\\ / VV \\ //#################- * -###################\\\\/ \\//###################- * _#/|##########/\\######( /\\ )######/\\##########|\\#_ * |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\| * ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| ' * ` ` ` ` / | | | | \\ ' ' ' ' * ( | | | | ) * __\\ | | | | /__ * (vvv(VVV)(VVV)vvv) * * HERE BE DRAGONS * */ package com.github.shadowsocks import android.content.pm.PackageManager import android.content.{ClipData, ClipboardManager, Context} import android.graphics.PixelFormat import android.graphics.drawable.Drawable import android.os.{Bundle, Handler} import android.preference.PreferenceManager import android.support.v7.app.AppCompatActivity import android.support.v7.widget.Toolbar import android.support.v7.widget.Toolbar.OnMenuItemClickListener import android.view.View.OnClickListener import android.view.ViewGroup.LayoutParams import android.view._ import android.widget.AbsListView.OnScrollListener import android.widget.CompoundButton.OnCheckedChangeListener import android.widget._ import android.Manifest.permission import com.github.shadowsocks.utils.{Key, Utils} import scala.collection.JavaConversions._ import scala.collection.mutable import scala.language.implicitConversions object AppManager { case class ProxiedApp(name: String, packageName: String, icon: Drawable) private case class ListEntry(switch: Switch, text: TextView, icon: ImageView) var cachedApps: Array[ProxiedApp] = _ private def getApps(pm: PackageManager) = { if (cachedApps == null) cachedApps = pm.getInstalledPackages(PackageManager.GET_PERMISSIONS) .filter(p => p.requestedPermissions != null && p.requestedPermissions.contains(permission.INTERNET)) .map(p => new ProxiedApp(pm.getApplicationLabel(p.applicationInfo).toString, p.packageName, p.applicationInfo.loadIcon(pm))).toArray cachedApps } } class AppManager extends AppCompatActivity with OnCheckedChangeListener with OnClickListener with OnMenuItemClickListener { import AppManager._ private var apps: Array[ProxiedApp] = _ private var proxiedApps: mutable.HashSet[String] = _ private var toolbar: Toolbar = _ private var appListView: ListView = _ private var loadingView: View = _ private var overlay: TextView = _ private var adapter: ListAdapter = _ @volatile private var appsLoading: Boolean = _ def loadApps() { appsLoading = true proxiedApps = ShadowsocksApplication.settings.getString(Key.proxied, "").split('\\n').to[mutable.HashSet] apps = getApps(getPackageManager).sortWith((a, b) => { val aProxied = proxiedApps.contains(a.packageName) if (aProxied ^ proxiedApps.contains(b.packageName)) aProxied else a.name.compareToIgnoreCase(b.name) < 0 }) adapter = new ArrayAdapter[ProxiedApp](this, R.layout.layout_apps_item, R.id.itemtext, apps) { override def getView(position: Int, view: View, parent: ViewGroup): View = { var convertView = view var entry: ListEntry = null if (convertView == null) { convertView = getLayoutInflater.inflate(R.layout.layout_apps_item, parent, false) entry = new ListEntry(convertView.findViewById(R.id.itemcheck).asInstanceOf[Switch], convertView.findViewById(R.id.itemtext).asInstanceOf[TextView], convertView.findViewById(R.id.itemicon).asInstanceOf[ImageView]) convertView.setOnClickListener(AppManager.this) convertView.setTag(entry) entry.switch.setOnCheckedChangeListener(AppManager.this) } else { entry = convertView.getTag.asInstanceOf[ListEntry] } val app: ProxiedApp = apps(position) entry.text.setText(app.name) entry.icon.setImageDrawable(app.icon) val switch = entry.switch switch.setTag(app) switch.setChecked(proxiedApps.contains(app.packageName)) entry.text.setTag(switch) convertView } } } private def setProxied(pn: String, proxied: Boolean) = if (proxied) proxiedApps.add(pn) else proxiedApps.remove(pn) /** Called an application is check/unchecked */ def onCheckedChanged(buttonView: CompoundButton, isChecked: Boolean) { val app: ProxiedApp = buttonView.getTag.asInstanceOf[ProxiedApp] if (app != null) setProxied(app.packageName, isChecked) saveAppSettings(this) } def onClick(v: View) { val switch = v.getTag.asInstanceOf[ListEntry].switch val app: ProxiedApp = switch.getTag.asInstanceOf[ProxiedApp] if (app != null) { val proxied = !proxiedApps.contains(app.packageName) setProxied(app.packageName, proxied) switch.setChecked(proxied) } saveAppSettings(this) } override def onDestroy() { super.onDestroy() if (handler != null) { handler.removeCallbacksAndMessages(null) handler = null } } def onMenuItemClick(item: MenuItem): Boolean = { val clipboard = getSystemService(Context.CLIPBOARD_SERVICE).asInstanceOf[ClipboardManager] val prefs = PreferenceManager.getDefaultSharedPreferences(getBaseContext) item.getItemId match { case R.id.action_export => val bypass = prefs.getBoolean(Key.isBypassApps, false) val proxiedAppString = prefs.getString(Key.proxied, "") val clip = ClipData.newPlainText(Key.proxied, bypass + "\\n" + proxiedAppString) clipboard.setPrimaryClip(clip) Toast.makeText(this, R.string.action_export_msg, Toast.LENGTH_SHORT).show() return true case R.id.action_import => if (clipboard.hasPrimaryClip) { val clipdata = clipboard.getPrimaryClip val label = clipdata.getDescription.getLabel if (label == Key.proxied) { val proxiedAppSequence = clipdata.getItemAt(0).getText if (proxiedAppSequence != null) { val proxiedAppString = proxiedAppSequence.toString if (!proxiedAppString.isEmpty) { val editor = prefs.edit val i = proxiedAppString.indexOf('\\n') if (i < 0) editor.putBoolean(Key.isBypassApps, proxiedAppString.toBoolean).putString(Key.proxied, "").apply() else editor.putBoolean(Key.isBypassApps, proxiedAppString.substring(0, i).toBoolean) .putString(Key.proxied, proxiedAppString.substring(i + 1)).apply() Toast.makeText(this, R.string.action_import_msg, Toast.LENGTH_SHORT).show() // Restart activity appListView.setVisibility(View.GONE) loadingView.setVisibility(View.VISIBLE) if (appsLoading) appsLoading = false else loadAppsAsync() return true } } } } Toast.makeText(this, R.string.action_import_err, Toast.LENGTH_SHORT).show() return false } false } protected override def onCreate(savedInstanceState: Bundle) { super.onCreate(savedInstanceState) handler = new Handler() this.setContentView(R.layout.layout_apps) toolbar = findViewById(R.id.toolbar).asInstanceOf[Toolbar] toolbar.setTitle(R.string.proxied_apps) toolbar.setNavigationIcon(R.drawable.abc_ic_ab_back_mtrl_am_alpha) toolbar.setNavigationOnClickListener((v: View) => { val intent = getParentActivityIntent if (intent == null) finish else navigateUpTo(intent) }) toolbar.inflateMenu(R.menu.app_manager_menu) toolbar.setOnMenuItemClickListener(this) this.overlay = View.inflate(this, R.layout.overlay, null).asInstanceOf[TextView] getWindowManager.addView(overlay, new WindowManager.LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT, WindowManager.LayoutParams.TYPE_APPLICATION, WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE | WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE, PixelFormat.TRANSLUCENT)) findViewById(R.id.onSwitch).asInstanceOf[Switch] .setOnCheckedChangeListener((button: CompoundButton, checked: Boolean) => { ShadowsocksApplication.settings.edit().putBoolean(Key.isProxyApps, checked).apply() finish() }) val bypassSwitch = findViewById(R.id.bypassSwitch).asInstanceOf[Switch] bypassSwitch.setOnCheckedChangeListener((button: CompoundButton, checked: Boolean) => ShadowsocksApplication.settings.edit().putBoolean(Key.isBypassApps, checked).apply()) bypassSwitch.setChecked(ShadowsocksApplication.settings.getBoolean(Key.isBypassApps, false)) loadingView = findViewById(R.id.loading) appListView = findViewById(R.id.applistview).asInstanceOf[ListView] appListView.setOnScrollListener(new AbsListView.OnScrollListener { var visible = false def onScroll(view: AbsListView, firstVisibleItem: Int, visibleItemCount: Int, totalItemCount: Int) { if (visible) { val name = apps(firstVisibleItem).name overlay.setText(if (name != null && name.length > 1) name(0).toUpper.toString else "*") overlay.setVisibility(View.VISIBLE) } } def onScrollStateChanged(view: AbsListView, scrollState: Int) { visible = true if (scrollState == OnScrollListener.SCROLL_STATE_IDLE) { overlay.setVisibility(View.INVISIBLE) } } }) loadAppsAsync() } def loadAppsAsync() { ThrowableFuture { while (!appsLoading) loadApps() appsLoading = false handler.post(() => { appListView.setAdapter(adapter) Utils.crossFade(AppManager.this, loadingView, appListView) }) } } def saveAppSettings(context: Context) { if (!appsLoading) ShadowsocksApplication.settings.edit.putString(Key.proxied, proxiedApps.mkString("\\n")).apply } var handler: Handler = null override def onKeyUp(keyCode: Int, event: KeyEvent) = keyCode match { case KeyEvent.KEYCODE_MENU => if (toolbar.isOverflowMenuShowing) toolbar.hideOverflowMenu else toolbar.showOverflowMenu case _ => super.onKeyUp(keyCode, event) } }
baohaojun/shadowsocks-android
src/main/scala/com/github/shadowsocks/AppManager.scala
Scala
gpl-3.0
11,482
package uk.co.grahamcox.oauth import com.github.nscala_time.time.Imports._ import javax.crypto._ import org.apache.commons.codec.binary.Base64 import grizzled.slf4j.Logger /** * Base class to represent the Signing Method */ abstract class SigningMethod { /** * Sign the given value with the given key * @param value The value to sign * @param key The key to sign it with * @return The signed value */ def sign(value: String, key: String): String /** * Get the name of the signing method * @return the name */ def name: String } /** * Implementation of the HMAC-SHA1 Signing Method */ class HmacSha1SigningMethod extends SigningMethod { /** The name to give the Java Crypto API for the Mac to use */ private val MacName = "HmacSHA1" /** * Sign the given value with the given key * @param value The value to sign * @param key The key to sign it with * @return The signed value */ def sign(value: String, key: String): String = { val mac = Mac.getInstance(MacName) mac.init(new spec.SecretKeySpec(key.getBytes(), MacName)) val signed = mac.doFinal(value.getBytes()) Base64.encodeBase64String(signed) } /** * Get the name of the signing method * @return the name */ def name: String = "HMAC-SHA1" } /** * Representation of a Signature in the OAuth transaction * @param value The value of the signature * @param method The method for the signature */ class Signature(val value: String, val method: String) /** * Companion object to create a Signature */ object Signature { /** The logger to use */ val logger = Logger[this.type] /** * Actually build the signature from the component pieces * @param request The details of the request * @param nonce The nonce to use * @param timestamp The timestamp to use * @param method The signature method to use * @param version The OAuth version to use */ def apply(request: Request, consumerKey: Key, token: Option[Key] = None, nonce: Nonce, timestamp: DateTime, method: SigningMethod = new HmacSha1SigningMethod(), version: String = "1.0"): Signature = { val oauthParams = Map("oauth_consumer_key" -> consumerKey.key, "oauth_nonce" -> nonce.value, "oauth_signature_method" -> method.name, "oauth_timestamp" -> (timestamp.withZone(DateTimeZone.UTC).getMillis() / 1000).toString(), "oauth_version" -> version) val tokenParams = token match { case Some(k: Key) => Map("oauth_token" -> k.key) case None => Map.empty[String, String] } val parameters = (oauthParams ++ tokenParams ++ request.parameters).foldLeft(Seq.empty[String]) { case (soFar, (k, v)) => { soFar ++ Seq(PercentEncoder(k) + "=" + PercentEncoder(v)) } } sorted val paramString = parameters.mkString("&") logger.debug(s"Param String = $paramString") val signatureString = Seq(request.method, PercentEncoder(request.url), PercentEncoder(paramString)).mkString("&") logger.debug(s"Signature String = $signatureString") val signingKey = PercentEncoder(consumerKey.secret) + "&" + (token match { case Some(k: Key) => PercentEncoder(k.secret) case None => "" }) logger.debug(s"Signing key = $signingKey") new Signature(method.sign(signatureString, signingKey), method.name) } }
sazzer/books
oauth/src/main/scala/uk/co/grahamcox/oauth/Signature.scala
Scala
gpl-3.0
3,355