code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
import org.scalatest.{Matchers, FunSuite} /** @version 1.0.0 */ class ArmstrongNumbersTest extends FunSuite with Matchers { test("Single digit numbers are Armstrong numbers") { ArmstrongNumbers.isArmstrongNumber(5) should be (true) } test("There are no 2 digit Armstrong numbers") { pending ArmstrongNumbers.isArmstrongNumber(10) should be (false) } test("Three digit number that is an Armstrong number") { pending ArmstrongNumbers.isArmstrongNumber(153) should be (true) } test("Three digit number that is not an Armstrong number") { pending ArmstrongNumbers.isArmstrongNumber(100) should be (false) } test("Four digit number that is an Armstrong number") { pending ArmstrongNumbers.isArmstrongNumber(9474) should be (true) } test("Four digit number that is not an Armstrong number") { pending ArmstrongNumbers.isArmstrongNumber(9475) should be (false) } test("Seven digit number that is an Armstrong number") { pending ArmstrongNumbers.isArmstrongNumber(9926315) should be (true) } test("Seven digit number that is not an Armstrong number") { pending ArmstrongNumbers.isArmstrongNumber(9926314) should be (false) } }
exercism/xscala
exercises/practice/armstrong-numbers/src/test/scala/ArmstrongNumbersTest.scala
Scala
mit
1,219
// Databricks notebook source exported at Thu, 7 Jul 2016 04:38:56 UTC // MAGIC %md // MAGIC // MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) // MAGIC // MAGIC // MAGIC ### Course Project by [Akinwande Atanda](https://nz.linkedin.com/in/akinwande-atanda) // MAGIC // MAGIC *supported by* [![](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/images/databricks_logoTM_200px.png)](https://databricks.com/) // MAGIC and // MAGIC [![](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/images/AWS_logoTM_200px.png)](https://www.awseducate.com/microsite/CommunitiesEngageHome) // COMMAND ---------- // MAGIC %md // MAGIC The [html source url](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/db/studentProjects/02_AkinwandeAtanda/Tweet_Analytics/041_TA01_02_Filtered_Tweets_Collector_Set-up_by_Keywords_and_Hashtags.html) of this databricks notebook and its recorded Uji ![Image of Uji, Dogen's Time-Being](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/images/UjiTimeBeingDogen.png "uji"): // MAGIC // MAGIC [![sds/uji/studentProjects/02_AkinwandeAtanda/Tweet_Analytics/041_TA01_02_Filtered_Tweets_Collector_Set-up_by_Keywords_and_Hashtags](http://img.youtube.com/vi/zJirlHAV6YU/0.jpg)](https://www.youtube.com/v/zJirlHAV6YU?rel=0&autoplay=1&modestbranding=1&start=0&end=1611) // COMMAND ---------- // MAGIC %md // MAGIC #Tweet Analytics // MAGIC // MAGIC [Presentation contents](https://github.com/aaa121/Spark-Tweet-Streaming-Presentation-May-2016). // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC ## Filtered Generic Twitter Collector by Keywords and Hashtags // MAGIC // MAGIC // MAGIC Remeber that the use of twitter itself comes with various strings attached. // MAGIC - **Read:** [Twitter Rules](https://twitter.com/rules) // MAGIC // MAGIC // MAGIC Crucially, the use of the content from twitter by you (as done in this worksheet) comes with some strings. // MAGIC - **Read:** [Developer Agreement & Policy Twitter Developer Agreement](https://dev.twitter.com/overview/terms/agreement-and-policy) // COMMAND ---------- import org.apache.spark._ import org.apache.spark.storage._ import org.apache.spark.streaming._ import org.apache.spark.streaming.twitter.TwitterUtils import twitter4j.auth.OAuthAuthorization import twitter4j.conf.ConfigurationBuilder import com.google.gson.Gson // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC ### Step 1: Enter your Twitter API Credentials. // MAGIC * Go to https://apps.twitter.com and look up your Twitter API Credentials, or create an app to create them. // MAGIC * Run this cell for the input cells to appear. // MAGIC * Enter your credentials. // MAGIC * Run the cell again to pick up your defaults. // MAGIC // MAGIC The cell-below is hidden to not expose the Twitter API Credentials: `consumerKey`, `consumerSecret`, `accessToken` and `accessTokenSecret`. // COMMAND ---------- System.setProperty("twitter4j.oauth.consumerKey", getArgument("1. Consumer Key (API Key)", "")) System.setProperty("twitter4j.oauth.consumerSecret", getArgument("2. Consumer Secret (API Secret)", "")) System.setProperty("twitter4j.oauth.accessToken", getArgument("3. Access Token", "")) System.setProperty("twitter4j.oauth.accessTokenSecret", getArgument("4. Access Token Secret", "")) // COMMAND ---------- // MAGIC %md // MAGIC If you see warnings then ignore for now: // MAGIC [https://forums.databricks.com/questions/6941/change-in-getargument-for-notebook-input.html](https://forums.databricks.com/questions/6941/change-in-getargument-for-notebook-input.html). // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC ### Step 2: Configure where to output each filtered Batches of Tweet Stream and how often to compute them. // MAGIC * Run this cell for the input cells to appear. // MAGIC * Enter your credentials. // MAGIC * Run the cell again to pick up your defaults. // COMMAND ---------- val outputDirectory = getArgument("1. Output Directory", "twitterNew3") val batchInterval = getArgument("2. Interval for each DStream in Minutes ", "1").toInt val timeoutJobLength = getArgument("3. Max Time to fetch all batches of Dstream", "100").toInt * 1000 // COMMAND ---------- // Replace with your AWS S3 credentials // NOTE: Set the access to this notebook appropriately to protect the security of your keys. // Or you can delete this cell after you run the mount command below once successfully. val AccessKey = getArgument("1. ACCESS_KEY", "REPLACE_WITH_YOUR_ACCESS_KEY") val SecretKey = getArgument("2. SECRET_KEY", "REPLACE_WITH_YOUR_SECRET_KEY") val EncodedSecretKey = SecretKey.replace("/", "%2F") val AwsBucketName = getArgument("3. S3_BUCKET", "REPLACE_WITH_YOUR_S3_BUCKET") val MountName = getArgument("4. MNT_NAME", "REPLACE_WITH_YOUR_MOUNT_NAME") val s3Filename = "tweetDump" // COMMAND ---------- dbutils.fs.unmount(s"/mnt/$MountName") // finally unmount when done // COMMAND ---------- dbutils.fs.mount(s"s3a://$AccessKey:$EncodedSecretKey@$AwsBucketName", s"/mnt/$MountName") // mount if unmounted // COMMAND ---------- // MAGIC %md **A directory can be created to save the Tweet Stream** // COMMAND ---------- dbutils.fs.mkdirs(s"/mnt/$MountName/twitterNew3/") // COMMAND ---------- //dbutils.fs.rm("/mnt/$MountName/NAME_OF_DIRECTORY/",recurse=true) //Remove the directory if previously created and no longer required for further use. // COMMAND ---------- display(dbutils.fs.ls(s"/mnt/s3Data/twitterNew3")) // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC ### Step 3: Run the Twitter Streaming job. // COMMAND ---------- // MAGIC %md // MAGIC Create the function to set-up the Streaming Context and the streaming job. // COMMAND ---------- // MAGIC %md // MAGIC ###Keyword(s) and Hashtag(s) Tracking Set-up // MAGIC * Create a list of keyword(s) or hashtag(s) to track from Twitter // MAGIC * Twitter4j.Status returns ONLY tweets that contain any of the keyword(s) or hashtag(s) either in lower or upper case // MAGIC * For example: We created a list of keywords and hashtags to track tweets about the US presumptive Republican Presidential Candidate, Donald J. Trump // MAGIC * Tips: Search for popular hashtags on a specific topic on RiteTag.com // MAGIC [![](http://www.wonderoftech.com/wp-content/uploads/2014/07/RiteTag-Logo.jpg)](https://ritetag.com/hashtag-search/Donald%20trump?green=0) // COMMAND ---------- // MAGIC %md // MAGIC *** Read the Infographic on how to use twitter to increase the number of followers. This is relevant to those interested in marketing via social media *** // COMMAND ---------- //This allows easy embedding of publicly available information into any other notebook //when viewing in git-book just ignore this block - you may have to manually chase the URL in frameIt("URL"). //Example usage: // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250)) def frameIt( u:String, h:Int ) : String = { """<iframe src=""""+ u+"""" width="95%" height="""" + h + """" sandbox> <p> <a href="http://spark.apache.org/docs/latest/index.html"> Fallback link for browsers that, unlikely, don't support frames </a> </p> </iframe>""" } displayHTML(frameIt("http://www.wonderoftech.com/best-twitter-tips-followers/", 600)) // COMMAND ---------- // the Library has already been attached to this cluster (show live how to do this from scratch?) var newContextCreated = false var numTweetsCollected = 0L // track number of tweets collected //val conf = new SparkConf().setAppName("TrackedTweetCollector").setMaster("local") // This is the function that creates the SteamingContext and sets up the Spark Streaming job. def creatingFunc(): StreamingContext = { // Create a Spark Streaming Context. val ssc = new StreamingContext(sc, Minutes(batchInterval)) // Create a Twitter Stream for the input source. val auth = Some(new OAuthAuthorization(new ConfigurationBuilder().build())) val track = List("Trump2016", "#MakeAmericaGreatAgain", "Donald Trump","#lovetrumpshate") //List(“Hillary Clinton”, “#neverhillary”, “#hillaryclinton”, “#demthrones”) val twitterStream = TwitterUtils.createStream(ssc, auth, track) val twitterStreamJson = twitterStream.map(x => { val gson = new Gson(); val xJson = gson.toJson(x) xJson }) val partitionsEachInterval = 1 // This tells the number of partitions in each RDD of tweets in the DStream. twitterStreamJson.foreachRDD((rdd, time) => { // for each filtered RDD in the DStream val count = rdd.count() if (count > 0) { val outputRDD = rdd.repartition(partitionsEachInterval) // repartition as desired //outputRDD.saveAsTextFile(s"${outputDirectory}/tweets_" + time.milliseconds.toString) // save as textfile outputRDD.saveAsTextFile(s"/mnt/$MountName/${outputDirectory}" + "/tweets_" + time.milliseconds.toString+".txt") // save as textfile in s3 numTweetsCollected += count // update with the latest count } }) newContextCreated = true ssc } // COMMAND ---------- // MAGIC %md // MAGIC Create the StreamingContext using getActiveOrCreate, as required when starting a streaming job in Databricks. // COMMAND ---------- val ssc = StreamingContext.getActiveOrCreate(creatingFunc) // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC Start the Spark Streaming Context and return when the Streaming job exits or return with the specified timeout. // COMMAND ---------- ssc.start() // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC Stop Streaming and/or Terminate the Spark Streaming Context (=true) and return when the Streaming job exits or return with the specified timeout. // COMMAND ---------- ssc.start() ssc.awaitTerminationOrTimeout(timeoutJobLength) //ssc.stop(stopSparkContext = false) // COMMAND ---------- // MAGIC %md // MAGIC Check out the Clusters 'Streaming` UI as the job is running. // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC Stop any active Streaming Contexts, but don't stop the spark contexts they are attached to. // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC ### Step 4: View the Results. // COMMAND ---------- display(dbutils.fs.ls("/mnt/s3Data/twitterNew2/")) // COMMAND ---------- // MAGIC %md // MAGIC Read each RDD in the DStream as Text File // MAGIC // MAGIC * Get the file name from the above `display` and edit the input string to `textFile` below. // COMMAND ---------- val rdd1 = sc.textFile(s"/mnt/s3Data/twitterNew2/tweets_1463704200000.txt/") // COMMAND ---------- rdd1.count // COMMAND ---------- rdd1.take(1) // COMMAND ---------- display(dbutils.fs.ls(s"/mnt/$MountName/${outputDirectory}")) // COMMAND ---------- // MAGIC %md // MAGIC ### 5. Read all the RDD as a Whole Text File // COMMAND ---------- //val dStream = sc.wholeTextFiles(s"/mnt/$MountName/${outputDirectory}") val dStream = sc.textFile(s"/mnt/s3Data/twitterNew2/*.txt/") // COMMAND ---------- dStream.take(1) // COMMAND ---------- dStream.count //This returns the number of events or tweets in all the RDD stream // COMMAND ---------- // MAGIC %md A better way of Merging the Files // COMMAND ---------- val dStreamw = sc.wholeTextFiles(s"/mnt/s3Data/twitterNew2/*.txt/") // COMMAND ---------- val dStreamTitle = dStreamw.map(rdd => rdd._1).collect // COMMAND ---------- val dStreamContent = dStreamw.map(rdd => rdd._2) dStreamContent.cache // COMMAND ---------- dStreamContent.take(1) // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) // MAGIC // MAGIC // MAGIC ### Course Project by [Akinwande Atanda](https://nz.linkedin.com/in/akinwande-atanda) // MAGIC // MAGIC *supported by* [![](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/images/databricks_logoTM_200px.png)](https://databricks.com/) // MAGIC and // MAGIC [![](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/images/AWS_logoTM_200px.png)](https://www.awseducate.com/microsite/CommunitiesEngageHome)
lamastex/scalable-data-science
db/studentProjects/02_AkinwandeAtanda/Tweet_Analytics/041_TA01_02_Filtered_Tweets_Collector_Set-up_by_Keywords_and_Hashtags.scala
Scala
unlicense
12,409
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.cloudml.zen.ml.neuralNetwork import com.github.cloudml.zen.ml.util.MnistDatasetSuite import org.scalatest.{FunSuite, Matchers} class StackedRBMSuite extends FunSuite with MnistDatasetSuite with Matchers { ignore("StackedRBM") { val (data, numVisible) = mnistTrainDataset(5000) data.cache() val topology = Array(numVisible, 300, 300, 500) val stackedRBM = StackedRBM.train(data.map(_._1), 100, 1200, topology, 0.01, 0.1, 0.0) } }
bhoppi/zen
ml/src/test/scala/com/github/cloudml/zen/ml/neuralNetwork/StackedRBMSuite.scala
Scala
apache-2.0
1,274
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.platanios.tensorflow.api.ops import org.platanios.tensorflow.api.core.Shape import org.platanios.tensorflow.api.implicits.Implicits._ import org.platanios.tensorflow.api.ops.basic.Basic import org.platanios.tensorflow.api.ops.math.Math /** * @author Emmanouil Antonios Platanios */ package object metrics { private[api] trait API { type Metric[T, R] = metrics.Metric[T, R] type MapMetric[S, T, R] = metrics.MapMetric[S, T, R] type Mean = metrics.Mean type Accuracy = metrics.Accuracy type ConfusionMatrix[T] = metrics.ConfusionMatrix[T] type GroupedPrecision = metrics.GroupedPrecision type PrecisionAtK = metrics.PrecisionAtK val Metric : metrics.Metric.type = metrics.Metric val MapMetric : metrics.MapMetric.type = metrics.MapMetric val Mean : metrics.Mean.type = metrics.Mean val Accuracy : metrics.Accuracy.type = metrics.Accuracy val ConfusionMatrix : metrics.ConfusionMatrix.type = metrics.ConfusionMatrix val GroupedPrecision: metrics.GroupedPrecision.type = metrics.GroupedPrecision val PrecisionAtK : metrics.PrecisionAtK.type = metrics.PrecisionAtK } // TODO: [SPARSE] Add versions for the following utilities. /** Filters all but `selectedID` out of `ids`. * * @param ids Tensor containing the IDs to filter. * @param selectedID Scalar containing the ID to select. * @return Sparse tensor with the same shape as `ids`, but containing only the entries equal to `selectedID`. */ private[metrics] def selectID( ids: Output[Long], selectedID: Output[Long] ): SparseOutput[Long] = { // The shape of filled IDs is the same as `ids` with the last axis size collapsed to 1. val idsShape = Basic.shape(ids) val idsLastAxis = Basic.size(idsShape) - 1L val filledSelectedIDShape = Math.reducedShape(idsShape, Basic.reshape(idsLastAxis, Shape(1))) // Intersect `ids` with the selected ID. val filledSelectedID = Basic.fill[Long, Long](filledSelectedIDShape)(selectedID) val result = Sets.setIntersection(filledSelectedID, ids) SparseOutput(result.indices, result.values, idsShape) } /** Calculates true positives for the recall@k and the precision@k metrics. * * If `labelID` is specified, the constructed op calculates binary true positives for `labelID` only. * If `labelID` is not specified, then it calculates metrics for `k` predicted vs `n` labels. * * @param labels Tensor with shape `[D1, ... DN, numLabels]`, where `N >= 1` and `numLabels` is the * number of target classes for the associated prediction. Commonly, `N = 1` and `labels` has * shape `[batchSize, numLabels]`. `[D1, ..., DN]` must match the shape of `predictionIDs`. * @param predictionIDs 1-D or higher tensor with its last dimension corresponding to the top `k` predicted * classes. For rank `n`, the first `n-1` dimensions must match the shape of `labels`. * @param labelID Optional label for which we want to compute the number of true positives. * @param weights Optional weights tensor with rank is either `0`, or `n-1`, where `n` is the rank of * `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be * either `1`, or the same as the corresponding `labels` dimension). * @param name Namescope to use for all created ops. * @return Tensor containing the number of true positives. */ private[metrics] def sparseTruePositives( labels: Output[Long], predictionIDs: Output[Long], labelID: Option[Output[Long]] = None, weights: Option[Output[Float]] = None, name: String = "SparseTruePositives" ): Output[Float] = { Op.nameScope(name) { val numTruePositives = labelID match { case None => Sets.setSize(Sets.setIntersection(predictionIDs, labels)).castTo[Float] case Some(selectedID) => val filteredPredictionIDs = selectID(predictionIDs, selectedID) val filteredLabels = selectID(labels, selectedID) Sets.setSize(Sets.setIntersection(filteredPredictionIDs, filteredLabels)).castTo[Float] } weights match { case None => numTruePositives case Some(w) => Op.createWith(controlDependencies = Set(Metric.weightsAssertBroadcastable(numTruePositives, w))) { Math.multiply(numTruePositives, w) } } } } /** Calculates streaming true positives for the recall@k and the precision@k metrics. * * If `labelID` is specified, the constructed op calculates binary true positives for `labelID` only. * If `labelID` is not specified, then it calculates metrics for `k` predicted vs `n` labels. * * @param labels Tensor with shape `[D1, ... DN, numLabels]`, where `N >= 1` and `numLabels` is the * number of target classes for the associated prediction. Commonly, `N = 1` and `labels` has * shape `[batchSize, numLabels]`. `[D1, ..., DN]` must match the shape of `predictionIDs`. * @param predictionIDs 1-D or higher tensor with its last dimension corresponding to the top `k` predicted * classes. For rank `n`, the first `n-1` dimensions must match the shape of `labels`. * @param labelID Optional label for which we want to compute the number of true positives. * @param weights Optional weights tensor with rank is either `0`, or `n-1`, where `n` is the rank of * `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be * either `1`, or the same as the corresponding `labels` dimension). * @param name Namescope to use for all created ops. * @return Streaming metric instance for computing a tensor containing the number of true positives. */ private[metrics] def streamingSparseTruePositives( labels: Output[Long], predictionIDs: Output[Long], labelID: Option[Output[Long]] = None, weights: Option[Output[Float]] = None, name: String = "StreamingSparseTruePositives" ): Metric.StreamingInstance[Output[Float]] = { Op.nameScope(name) { val numTruePositives = sparseTruePositives(labels, predictionIDs, labelID, weights) val batchNumTruePositives = Math.sum(numTruePositives) val accumulator = Metric.variable[Float](s"$name/Accumulator", Shape()) val value = accumulator.value val update = accumulator.assignAdd(batchNumTruePositives) val reset = accumulator.initializer Metric.StreamingInstance(value, update, reset, Set(accumulator)) } } /** Calculates false positives for the recall@k and the precision@k metrics. * * If `labelID` is specified, the constructed op calculates binary false positives for `labelID` only. * If `labelID` is not specified, then it calculates metrics for `k` predicted vs `n` labels. * * @param labels Tensor with shape `[D1, ... DN, numLabels]`, where `N >= 1` and `numLabels` is the * number of target classes for the associated prediction. Commonly, `N = 1` and `labels` has * shape `[batchSize, numLabels]`. `[D1, ..., DN]` must match the shape of `predictionIDs`. * @param predictionIDs 1-D or higher tensor with its last dimension corresponding to the top `k` predicted * classes. For rank `n`, the first `n-1` dimensions must match the shape of `labels`. * @param labelID Optional label for which we want to compute the number of false positives. * @param weights Optional weights tensor with rank is either `0`, or `n-1`, where `n` is the rank of * `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be * either `1`, or the same as the corresponding `labels` dimension). * @param name Namescope to use for all created ops. * @return Tensor containing the number of false positives. */ private[metrics] def sparseFalsePositives( labels: Output[Long], predictionIDs: Output[Long], labelID: Option[Output[Long]] = None, weights: Option[Output[Float]] = None, name: String = "SparseFalsePositives" ): Output[Float] = { Op.nameScope(name) { val numFalsePositives = labelID match { case None => Sets.setSize(Sets.setDifference(predictionIDs, labels, aMinusB = true)).castTo[Float] case Some(selectedID) => val filteredPredictionIDs = selectID(predictionIDs, selectedID) val filteredLabels = selectID(labels, selectedID) Sets.setSize(Sets.setDifference(filteredPredictionIDs, filteredLabels, aMinusB = true)).castTo[Float] } weights match { case None => numFalsePositives case Some(w) => Op.createWith(controlDependencies = Set(Metric.weightsAssertBroadcastable(numFalsePositives, w))) { Math.multiply(numFalsePositives, w) } } } } /** Calculates streaming false positives for the recall@k and the precision@k metrics. * * If `labelID` is specified, the constructed op calculates binary false positives for `labelID` only. * If `labelID` is not specified, then it calculates metrics for `k` predicted vs `n` labels. * * @param labels Tensor with shape `[D1, ... DN, numLabels]`, where `N >= 1` and `numLabels` is the * number of target classes for the associated prediction. Commonly, `N = 1` and `labels` has * shape `[batchSize, numLabels]`. `[D1, ..., DN]` must match the shape of `predictionIDs`. * @param predictionIDs 1-D or higher tensor with its last dimension corresponding to the top `k` predicted * classes. For rank `n`, the first `n-1` dimensions must match the shape of `labels`. * @param labelID Optional label for which we want to compute the number of false positives. * @param weights Optional weights tensor with rank is either `0`, or `n-1`, where `n` is the rank of * `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be * either `1`, or the same as the corresponding `labels` dimension). * @param name Namescope to use for all created ops. * @return Streaming metric instance for computing a tensor containing the number of false positives. */ private[metrics] def streamingSparseFalsePositives( labels: Output[Long], predictionIDs: Output[Long], labelID: Option[Output[Long]] = None, weights: Option[Output[Float]] = None, name: String = "StreamingSparseFalsePositives" ): Metric.StreamingInstance[Output[Float]] = { Op.nameScope(name) { val numFalsePositives = sparseFalsePositives(labels, predictionIDs, labelID, weights) val batchNumFalsePositives = Math.sum(numFalsePositives) val accumulator = Metric.variable[Float](s"$name/Accumulator", Shape()) val value = accumulator.value val update = accumulator.assignAdd(batchNumFalsePositives) val reset = accumulator.initializer Metric.StreamingInstance(value, update, reset, Set(accumulator)) } } /** Calculates false negatives for the recall@k and the precision@k metrics. * * If `labelID` is specified, the constructed op calculates binary false negatives for `labelID` only. * If `labelID` is not specified, then it calculates metrics for `k` predicted vs `n` labels. * * @param labels Tensor with shape `[D1, ... DN, numLabels]`, where `N >= 1` and `numLabels` is the * number of target classes for the associated prediction. Commonly, `N = 1` and `labels` has * shape `[batchSize, numLabels]`. `[D1, ..., DN]` must match the shape of `predictionIDs`. * @param predictionIDs 1-D or higher tensor with its last dimension corresponding to the top `k` predicted * classes. For rank `n`, the first `n-1` dimensions must match the shape of `labels`. * @param labelID Optional label for which we want to compute the number of false negatives. * @param weights Optional weights tensor with rank is either `0`, or `n-1`, where `n` is the rank of * `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be * either `1`, or the same as the corresponding `labels` dimension). * @param name Namescope to use for all created ops. * @return Tensor containing the number of false negatives. */ private[metrics] def sparseFalseNegatives( labels: Output[Long], predictionIDs: Output[Long], labelID: Option[Output[Long]] = None, weights: Option[Output[Float]] = None, name: String = "SparseFalseNegatives" ): Output[Float] = { Op.nameScope(name) { val numTruePositives = labelID match { case None => Sets.setSize(Sets.setDifference(predictionIDs, labels, aMinusB = false)).castTo[Float] case Some(selectedID) => val filteredPredictionIDs = selectID(predictionIDs, selectedID) val filteredLabels = selectID(labels, selectedID) Sets.setSize(Sets.setDifference(filteredPredictionIDs, filteredLabels, aMinusB = false)).castTo[Float] } weights match { case None => numTruePositives case Some(w) => Op.createWith(controlDependencies = Set(Metric.weightsAssertBroadcastable(numTruePositives, w))) { Math.multiply(numTruePositives, w) } } } } /** Calculates streaming false negatives for the recall@k and the precision@k metrics. * * If `labelID` is specified, the constructed op calculates binary false negatives for `labelID` only. * If `labelID` is not specified, then it calculates metrics for `k` predicted vs `n` labels. * * @param labels Tensor with shape `[D1, ... DN, numLabels]`, where `N >= 1` and `numLabels` is the * number of target classes for the associated prediction. Commonly, `N = 1` and `labels` has * shape `[batchSize, numLabels]`. `[D1, ..., DN]` must match the shape of `predictionIDs`. * @param predictionIDs 1-D or higher tensor with its last dimension corresponding to the top `k` predicted * classes. For rank `n`, the first `n-1` dimensions must match the shape of `labels`. * @param labelID Optional label for which we want to compute the number of false negatives. * @param weights Optional weights tensor with rank is either `0`, or `n-1`, where `n` is the rank of * `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be * either `1`, or the same as the corresponding `labels` dimension). * @param name Namescope to use for all created ops. * @return Streaming metric instance for computing a tensor containing the number of false negatives. */ private[metrics] def streamingSparseFalseNegatives( labels: Output[Long], predictionIDs: Output[Long], labelID: Option[Output[Long]] = None, weights: Option[Output[Float]] = None, name: String = "StreamingSparseFalseNegatives" ): Metric.StreamingInstance[Output[Float]] = { Op.nameScope(name) { val numFalseNegatives = sparseFalseNegatives(labels, predictionIDs, labelID, weights) val batchNumFalseNegatives = Math.sum(numFalseNegatives) val accumulator = Metric.variable[Float](s"$name/Accumulator", Shape()) val value = accumulator.value val update = accumulator.assignAdd(batchNumFalseNegatives) val reset = accumulator.initializer Metric.StreamingInstance(value, update, reset, Set(accumulator)) } } }
eaplatanios/tensorflow_scala
modules/api/src/main/scala/org/platanios/tensorflow/api/ops/metrics/package.scala
Scala
apache-2.0
16,961
package org.vaadin.addons.vaactor.demo import org.vaadin.addons.vaactor._ import org.vaadin.addons.vaactor.chat.ChatComponent import com.vaadin.flow.component.orderedlayout.VerticalLayout import com.vaadin.flow.component.page.Push import com.vaadin.flow.router.Route import com.vaadin.flow.shared.communication.PushMode import com.vaadin.flow.shared.ui.Transport import com.vaadin.flow.theme.Theme import com.vaadin.flow.theme.lumo.Lumo import akka.actor.ActorRef object SessionUI { class Strategy(hasSession: Vaactor.HasSession) extends ChatComponent.Strategy { override def login(name: String, sender: ActorRef): Unit = hasSession.session.tell(Session.Login(name), sender) override def logout(name: String, sender: ActorRef): Unit = hasSession.session.tell(Session.Logout, sender) override def send(name: String, text: String, sender: ActorRef): Unit = hasSession.session.tell(Session.Message(text), sender) } } /** UI for Vaactor chat with session support * * @author Otto Ringhofer */ @Route("session") @Theme(value = classOf[Lumo], variant = Lumo.DARK) @Push(value = PushMode.AUTOMATIC, transport = Transport.WEBSOCKET) class SessionUI extends VerticalLayout with Vaactor.HasSession { val strategy = new demo.SessionUI.Strategy(this) // "this" ssupports strategy with session actor val chatComponent: ChatComponent = new ChatComponent("Vaactor chat with session support", strategy) with Vaactor.AttachSession { // AttachSession /** Send to session actor on attach */ override val attachMessage: Any = Session.Attached /** Send to session actor on detach */ override val detachMessage: Any = Session.Detached } add(chatComponent) }
otto-ringhofer/vaactor
demo/src/main/scala/org/vaadin/addons/vaactor/demo/SessionUI.scala
Scala
apache-2.0
1,720
package coursera import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} package object extensions { implicit class ListExtensions[T](val source: List[T]) extends AnyVal { def sumBy(keySelector: T => Int): Int = ??? private def sumBy[B](keySelector: T => B)(implicit num: Numeric[B]): B = { source.map(keySelector).sum } } def f[T](that: Future[T]): PartialFunction[Throwable, Future[T]] = { case _: Throwable => that } def g[T]: PartialFunction[Throwable, Failure[T]] = { case t: Throwable => Failure(t) } implicit class FutureExtensions[T](val future: Future[T]) extends AnyVal { def fallbackTo[U >: T](that: =>Future[U])(implicit executor: ExecutionContext): Future[U] = { future.recoverWith(f(that.recoverWith(f(future)))) } def withTry()(implicit executor: ExecutionContext): Future[Try[T]] = { future.map(Success(_)) recover g } } def withTry[T](future: Future[T])(implicit executor: ExecutionContext): Future[Try[T]] = { future. future.map(Success(_)) recover { case t: Throwable => Failure(t) } } def fallbackTo[U](future: Future[U], that: =>Future[U])(implicit executor: ExecutionContext): Future[U] = { future.recoverWith { case _: Throwable => that.recoverWith { case _: Throwable => future } } } }
mitochon/hexercise
src/mooc/reactive/week4.codesamples/src/test/scala/coursera/Extensions.scala
Scala
mit
1,358
package at.logic.gapt.grammars import at.logic.gapt.expr._ import at.logic.gapt.expr.fol.folSubTerms import at.logic.gapt.expr.fol.Utils.numeral import at.logic.gapt.expr.hol.{ atoms, lcomp, simplify, toNNF } import at.logic.gapt.provers.maxsat.{ MaxSATSolver, bestAvailableMaxSatSolver } import at.logic.gapt.utils.Logger object SipGrammar { type Production = ( FOLVar, FOLTerm ) val tau = FOLVar( "τ" ) val beta = FOLVar( "β" ) val gamma = FOLVar( "γ" ) val gammaEnd = FOLVar( "γ_end" ) val alpha = FOLVar( "α" ) val nu = FOLVar( "ν" ) def gamma_i( i: Int ) = FOLVar( s"γ_$i" ) def instantiate( prod: Production, n: Int ): Set[VTRATG.Production] = ( prod match { case ( `tau`, r ) => var instanceProductions = Set[Production]() if ( !freeVariables( r ).contains( gamma ) ) instanceProductions ++= Seq( tau -> FOLSubstitution( alpha -> numeral( n ), nu -> numeral( 0 ), beta -> gamma_i( 0 ) )( r ) ) if ( !freeVariables( r ).contains( beta ) ) instanceProductions ++= ( 0 until n ) map { i => tau -> FOLSubstitution( alpha -> numeral( n ), nu -> numeral( i ), gamma -> gamma_i( i + 1 ) )( r ) } instanceProductions case ( `gamma`, r ) => ( 0 until n ) map { i => gamma_i( i ) -> FOLSubstitution( alpha -> numeral( n ), nu -> numeral( i ), gamma -> gamma_i( i + 1 ) )( r ) } toSet case ( `gammaEnd`, r ) => Set( gamma_i( n ) -> FOLSubstitution( alpha -> numeral( n ) )( r ) ) } ).map { case ( l, r ) => List( l ) -> List( r ) } } case class SipGrammar( productions: Set[SipGrammar.Production] ) { import SipGrammar._ override def toString: String = productions.map { case ( a, t ) => s"$a -> $t" }.toSeq.sorted.mkString( sys.props( "line.separator" ) ) def instanceGrammar( n: Int ) = VTRATG( tau, List( tau ) +: ( 0 until n ).inclusive.map( gamma_i ).map( List( _ ) ), productions flatMap { p => instantiate( p, n ) } ) } object stableSipGrammar { type InstanceLanguage = ( Int, Set[FOLTerm] ) def apply( instanceLanguages: Seq[InstanceLanguage] ) = { import SipGrammar._ val allTerms = instanceLanguages.flatMap( _._2 ) val topLevelStableTerms = stableTerms( allTerms, Seq( gamma, alpha, nu ) ).filter( !_.isInstanceOf[FOLVar] ) val argumentStableTerms = stableTerms( folSubTerms( allTerms flatMap { case FOLFunction( _, as ) => as } ), Seq( gamma, alpha, nu ) ) val prods = Set.newBuilder[Production] for ( st <- topLevelStableTerms ) { val fv = freeVariables( st ) if ( !fv.contains( nu ) ) prods += tau -> FOLSubstitution( gamma -> beta )( st ) prods += tau -> st } for ( st <- argumentStableTerms ) { val fv = freeVariables( st ) prods += gamma -> st if ( !fv.contains( nu ) && !fv.contains( gamma ) ) prods += gammaEnd -> st } SipGrammar( prods.result ) } } case class SipGrammarMinimizationFormula( g: SipGrammar ) { def productionIsIncluded( p: SipGrammar.Production ) = FOLAtom( s"sp,$p" ) def coversLanguageFamily( langs: Seq[stableSipGrammar.InstanceLanguage] ) = { val cs = Seq.newBuilder[Formula] langs foreach { case ( n, lang ) => val tratMinForm = new VectGrammarMinimizationFormula( g.instanceGrammar( n ) ) { override def productionIsIncluded( p: VTRATG.Production ) = FOLAtom( s"p,$n,$p" ) override def valueOfNonTerminal( t: Expr, a: Var, rest: Expr ) = FOLAtom( s"v,$n,$t,$a=$rest" ) } val instanceCovForm = tratMinForm.coversLanguage( lang ) cs += instanceCovForm val atomsInInstForm = atoms( instanceCovForm ) ( for ( p <- g.productions; instP <- SipGrammar.instantiate( p, n ) ) yield instP -> p ).groupBy( _._1 ).values foreach { l => val tratProdInc = tratMinForm.productionIsIncluded( l.head._1 ) if ( atomsInInstForm contains tratProdInc ) cs += Imp( tratProdInc, Or( l map ( _._2 ) map productionIsIncluded ) ) } } And( cs.result ) } } object minimizeSipGrammar extends Logger { def apply( g: SipGrammar, langs: Seq[stableSipGrammar.InstanceLanguage], maxSATSolver: MaxSATSolver = bestAvailableMaxSatSolver ): SipGrammar = { val formula = SipGrammarMinimizationFormula( g ) val hard = formula.coversLanguageFamily( langs ) debug( s"Logical complexity of the minimization formula: ${lcomp( simplify( toNNF( hard ) ) )}" ) val atomsInHard = atoms( hard ) val soft = g.productions map formula.productionIsIncluded filter atomsInHard.contains map ( Neg( _ ) -> 1 ) maxSATSolver.solve( hard, soft ) match { case Some( interp ) => SipGrammar( g.productions filter { p => interp.interpret( formula.productionIsIncluded( p ) ) } ) case None => throw new Exception( "Grammar does not cover language." ) } } } object findMinimalSipGrammar { def apply( langs: Seq[stableSipGrammar.InstanceLanguage], maxSATSolver: MaxSATSolver = bestAvailableMaxSatSolver ) = { val polynomialSizedCoveringGrammar = stableSipGrammar( langs ) minimizeSipGrammar( polynomialSizedCoveringGrammar, langs, maxSATSolver ) } }
gebner/gapt
core/src/main/scala/at/logic/gapt/grammars/induction.scala
Scala
gpl-3.0
5,204
package com.dominikgruber.fpinscala.chapter03 import org.scalatest._ class Exercise25Spec extends FlatSpec with Matchers { "size" should "count correctly" in { val b = Branch(Branch(Leaf(1), Leaf(2)), Leaf(3)) Tree.size(b) should be (5) } }
TheDom/functional-programming-in-scala
src/test/scala/com/dominikgruber/fpinscala/chapter03/Exercise25Spec.scala
Scala
mit
255
/* __ *\\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | | ** ** |/ ** \\* */ // DO NOT EDIT, CHANGES WILL BE LOST // This auto-generated code can be modified in "project/GenerateAnyVals.scala". // Afterwards, running "sbt generateSources" regenerates this source file. package scala /** `Boolean` (equivalent to Java's `boolean` primitive type) is a * subtype of [[scala.AnyVal]]. Instances of `Boolean` are not * represented by an object in the underlying runtime system. * * There is an implicit conversion from [[scala.Boolean]] => [[scala.runtime.RichBoolean]] * which provides useful non-primitive operations. */ final abstract class Boolean private extends AnyVal { /** Negates a Boolean expression. * * - `!a` results in `false` if and only if `a` evaluates to `true` and * - `!a` results in `true` if and only if `a` evaluates to `false`. * * @return the negated expression */ def unary_! : Boolean /** Compares two Boolean expressions and returns `true` if they evaluate to the same value. * * `a == b` returns `true` if and only if * - `a` and `b` are `true` or * - `a` and `b` are `false`. */ def ==(x: Boolean): Boolean /** * Compares two Boolean expressions and returns `true` if they evaluate to a different value. * * `a != b` returns `true` if and only if * - `a` is `true` and `b` is `false` or * - `a` is `false` and `b` is `true`. */ def !=(x: Boolean): Boolean /** Compares two Boolean expressions and returns `true` if one or both of them evaluate to true. * * `a || b` returns `true` if and only if * - `a` is `true` or * - `b` is `true` or * - `a` and `b` are `true`. * * @note This method uses 'short-circuit' evaluation and * behaves as if it was declared as `def ||(x: => Boolean): Boolean`. * If `a` evaluates to `true`, `true` is returned without evaluating `b`. */ def ||(x: Boolean): Boolean /** Compares two Boolean expressions and returns `true` if both of them evaluate to true. * * `a && b` returns `true` if and only if * - `a` and `b` are `true`. * * @note This method uses 'short-circuit' evaluation and * behaves as if it was declared as `def &&(x: => Boolean): Boolean`. * If `a` evaluates to `false`, `false` is returned without evaluating `b`. */ def &&(x: Boolean): Boolean // Compiler won't build with these seemingly more accurate signatures // def ||(x: => Boolean): Boolean // def &&(x: => Boolean): Boolean /** Compares two Boolean expressions and returns `true` if one or both of them evaluate to true. * * `a | b` returns `true` if and only if * - `a` is `true` or * - `b` is `true` or * - `a` and `b` are `true`. * * @note This method evaluates both `a` and `b`, even if the result is already determined after evaluating `a`. */ def |(x: Boolean): Boolean /** Compares two Boolean expressions and returns `true` if both of them evaluate to true. * * `a & b` returns `true` if and only if * - `a` and `b` are `true`. * * @note This method evaluates both `a` and `b`, even if the result is already determined after evaluating `a`. */ def &(x: Boolean): Boolean /** Compares two Boolean expressions and returns `true` if they evaluate to a different value. * * `a ^ b` returns `true` if and only if * - `a` is `true` and `b` is `false` or * - `a` is `false` and `b` is `true`. */ def ^(x: Boolean): Boolean // Provide a more specific return type for Scaladoc override def getClass(): Class[Boolean] = ??? } object Boolean extends AnyValCompanion { /** Transform a value type into a boxed reference type. * * Runtime implementation determined by `scala.runtime.BoxesRunTime.boxToBoolean`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]]. * * @param x the Boolean to be boxed * @return a java.lang.Boolean offering `x` as its underlying value. */ def box(x: Boolean): java.lang.Boolean = ??? /** Transform a boxed type into a value type. Note that this * method is not typesafe: it accepts any Object, but will throw * an exception if the argument is not a java.lang.Boolean. * * Runtime implementation determined by `scala.runtime.BoxesRunTime.unboxToBoolean`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]]. * * @param x the java.lang.Boolean to be unboxed. * @throws ClassCastException if the argument is not a java.lang.Boolean * @return the Boolean resulting from calling booleanValue() on `x` */ def unbox(x: java.lang.Object): Boolean = ??? /** The String representation of the scala.Boolean companion object. */ override def toString = "object scala.Boolean" }
felixmulder/scala
src/library/scala/Boolean.scala
Scala
bsd-3-clause
5,343
import com.bizo.mighty.csv.CSVReader import java.net.URLEncoder import org.apache.jena.riot.RDFFormat import org.apache.jena.riot.RDFDataMgr import java.io.FileOutputStream import org.apache.jena.rdf.model.ResourceFactory import org.apache.jena.rdf.model.Resource import org.apache.jena.rdf.model.ModelFactory import org.apache.jena.rdf.model.Model import org.apache.jena.vocabulary.RDF import org.apache.jena.vocabulary.OWL import org.apache.jena.vocabulary.DC import org.apache.jena.vocabulary.DC_11 import org.apache.jena.vocabulary.RDFS import org.apache.jena.sparql.vocabulary.FOAF import com.github.nscala_time.time.Imports._ import org.joda.time.format.ISODateTimeFormat import org.apache.jena.shared.PrefixMapping import org.apache.jena.datatypes.xsd.XSDDatatype import org.apache.jena.vocabulary.DCTerms import scala.collection.mutable.HashMap import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.Buffer import scala.util.control.Breaks._ import org.apache.jena.rdf.model.Property import com.bizo.mighty.csv.CSVReaderSettings object SCHOENBERGCSV2RDF extends Anything2RDF { val sns = "http://ldf.fi/schoenberg/schema#" val ns = "http://ldf.fi/schoenberg/" object EventType extends Enumeration { type EventType = Value val Sale,Gift,NoSale,Possession = Value } val Auction = EC("Auction") Auction.addProperty(RDFS.subClassOf, CIDOC.Event) val Possession = EC("Possession") Possession.addProperty(RDFS.subClassOf, CIDOC.Event) val Gift = EC("Gift") Gift.addProperty(RDFS.subClassOf, CIDOC.Transfer_of_Custody) val Sale = EC("Sale") Sale.addProperty(RDFS.subClassOf, CIDOC.Transfer_of_Custody) val CatalogueEntry = EC("Catalogue Entry") val Catalogue = EC("Catalogue") val Currency = EC("Currency") val Manuscript = EC("Manuscript") val Work = EC("Work") val catalogueEntry = EOP("catalogueEntry") val price = EDP("price") val currency = EOP("currency") val sold = EDP("sold") val represents = EOP("represents") val language = EOP("language") val decorated_initials = EDP("decorated initials") val historiated_initials = EDP("historiated initials") val date = EOP("data") val seller = EOP("seller") val artist = EOP("artist") val scribe = EOP("scribe") val buyer = EOP("buyer") val soldOnBehalfOf = EOP("put on sale on behalf of") val folios = EDP("folios") val columns = EDP("columns") val lines = EDP("lines") val height = EDP("height") val binding = EDP("binding") val width = EDP("width") val circa = EDP("circa") //FIXME val current_location = EDP("current location") //FIXME val liturgicalUse = EOP("liturgical use") val possiblySameAs = EOP("possibly same as") val small_miniatures = EDP("small miniatures") val large_miniatures = EDP("large miniatures") val fullpage_miniatures = EDP("full-page miniatures") val unspecified_miniatures = EDP("unspecified miniatures") val provenance = EOP("provenance") val material = EOP("material") val Vellum = I(ns+"material_vellum",Map("en"->"Vellum"),CIDOC.Material) val Paper = I(ns+"material_paper",Map("en"->"Paper"),CIDOC.Material) val mmap = new HashMap[String,String] def main(args: Array[String]): Unit = { var wr : CSVReader = null var headers: Array[String] = null var h: Map[String,Int] = null wr = CSVReader("schoenberg.csv")(CSVReaderSettings.Standard.copy(escapechar='\\0')) headers = wr.next h = headers.zipWithIndex.toMap println(headers.toSeq) /* * MANUSCRIPT_ID, DUPLICATE_MS, CAT_DATE, SELLER, SELLER2, INSTITUTION, BUYER, CAT_ID, CAT_OR_LOT_NUM, PRICE, CURRENCY, SOLD, * SECONDARY_SOURCE, CURRENT_LOCATION, AUTHOR_AUTHORITY, AUTHOR_VARIANT, TITLE, LNG, MAT, PLACE, MANUSCRIPT_USE, MANUSCRIPT_DATE, * CIRCA, ARTIST, SCRIBE, FOLIOS, COL, LINES, HGT, WDT, MANUSCRIPT_BINDING, PROVENANCE, COMMENTS, MANUSCRIPT_LINK, MIN_FL, MIN_LG, MIN_SM, * MIN_UN, H_INIT, D_INIT, ENTRY_COMMENTS, ADDEDON, ADDEDBY, ISAPPROVED, ISDELETED, LAST_MODIFIED, LAST_MODIFIED_BY, POSSIBLE_DUPS, * POSSIBLE_DUPS, X * List(219, 219,3401,17536,189079, 19970617, Sotheby's, , , , Western manuscripts and miniatures (LN7375), 62, 71445, , , , , Guillaume de Lorris|Jean de Meun, Guillaume de Lorris|Jean de Meung, Roman de la Rose, French, V, France, , 1425, c1h+, , , 172, 2, 34, 311, 260, , Royez|Hennier|Phillipps, Thomas, Sir, Phillipps129, , , , , , , , PROV-UPDATE-PHIL-1, 24-Sep-2009, import, , , 11-Aug-2011, posch, , , X) List(220, , 19970617, Sotheby's, , , , Western manuscripts and miniatures (LN7375), 61, 27513.75, , , , , , , Statutes, Savoy, Latin, P, France, Savoy, , 1475, C2H, , , 200, 1, 40, 245, 180, , De Glos La Malme|De Guigne, , , , , , , , , , 24-Sep-2009, import, , , 14-Mar-2011, aldevine, , , X) List(221, , 19970617, Sotheby's, , , , Western manuscripts and miniatures (LN7375), 60, 30360, , , , , , Jean D'orronville, Chronicles, Savoy, French, P, France, Savoy, , 1463, C3Q, , , 313, 1, 22, 303, 213, , Duke Savoy, , , , , , , , , , 24-Sep-2009, import, , , 14-Mar-2011, aldevine, , , X) List(222, 222,6391,9885,9941,13883,13866,77928,97264, 19970617, Sotheby's, , , , Western manuscripts and miniatures (LN7375), 69, 60555, , , , , , , Breviary, Latin, V, Netherlands, Delft, Augustinian, 1460, C3Q, Master of the Fagel Missal, , 246, 2, 23, 168, 122, Late 18thc binding For Wodhull, Canoness Regular St Agnes-Delft|Askew, Anthony, 1722-1774|Wodhull|Severne|Ingram|Jacobsen|Busch|Gunther|Symonds, Exh. Brussels 1979-11, , 1, , 1, , 9, 16, , 24-Sep-2009, import, , , 27-Nov-2013, fraas, , , X) * */ var i = 1 val midP = h("MANUSCRIPT_ID") val mdupidsP = h("DUPLICATE_MS") for (w <- wr) { val mid = w(midP) if (w.length!=50) println(i,w.length,mid) w(mdupidsP).split(",").map(_.trim).filter {!_.isEmpty } foreach { x => if (x<mmap.getOrElse(mid,mid)) mmap.put(mid,x) } i+=1 } for (k <- mmap.keySet) { var v = mmap.get(k) var lv = v while (lv!=None) { lv = mmap.get(v.get) if (lv!=None) v = lv } mmap.put(k,v.get) } wr = CSVReader("schoenberg.csv")(CSVReaderSettings.Standard.copy(escapechar='\\0')) headers = wr.next h = headers.zipWithIndex.toMap println(headers.toSeq) breakable {for (w <- wr) { val mid = mmap.getOrElse(w(midP),w(midP)) val ss = w(h("SOLD")) val date = "(\\\\d\\\\d\\\\d\\\\d)(\\\\d\\\\d)(\\\\d\\\\d)".r.replaceFirstIn(w(h("CAT_DATE")),"$1-$2-$3") val recipient = if (w(h("BUYER"))!="") w(h("BUYER")) else w(h("INSTITUTION")) val etype = if (ss=="" && w(h("SELLER"))=="" && w(h("INSTITUTION"))!="") EventType.Possession else if (ss=="GIFT") EventType.Gift else if (ss=="NO" || ss=="N?" || ss=="WD" || (recipient=="" && ss!="YES")) EventType.NoSale else EventType.Sale val auctionEvent = if (etype==EventType.Sale || etype==EventType.NoSale) Some(I(ns+"event_auction_"+mid+"_"+w(midP),Map("en"->("Auction of "+w(h("TITLE"))+" on "+date)),Auction)) else None val primarySeller = if (w(h("SELLER2"))!="") w(h("SELLER2")) else w(h("SELLER")) val transferEvent = if (etype==EventType.Sale || etype==EventType.Gift) Some(I(ns+"event_transfer_"+mid+"_"+w(midP),Map("en"->((if (etype==EventType.Gift) "Gift" else "Sale")+" of "+w(h("TITLE"))+" from "+primarySeller+" to "+recipient+" on "+date)),if (etype==EventType.Gift) Gift else Sale)) else None val beforePossessionEvent = if (etype!=EventType.Possession) Some(I(ns+"event_possession_"+mid+"_"+encode(primarySeller),Map("en"->("Possession by "+primarySeller+" of "+w(h("TITLE"))+" until "+date)),Possession)) else None val afterPossessionEvent = if (recipient!="") Some(I(ns+"event_possession_"+mid+"_"+encode(recipient),Map("en"->("Possession by "+recipient+" of "+w(h("TITLE"))+" after "+date)),Possession)) else None beforePossessionEvent.foreach(bpe => auctionEvent.orElse(transferEvent).foreach{ae => bpe.addProperty(CIDOC.occurs_before,ae);ae.addProperty(CIDOC.occurs_after,bpe)}); transferEvent.foreach(te => auctionEvent.foreach(ae => te.addProperty(CIDOC.occurs_during,ae))) auctionEvent.orElse(transferEvent).foreach(ae => afterPossessionEvent.foreach{ape => ae.addProperty(CIDOC.occurs_before,ape);ape.addProperty(CIDOC.occurs_after,ae)}); val c = I(ns+"catalogue_"+encode(w(h("CAT_ID"))),Map("en"->w(h("CAT_ID"))),Catalogue) val ce = I(ns+"catalogueEntry_"+mid+"_"+w(midP),Map("en"->(w(h("CAT_OR_LOT_NUM"))+": "+w(h("TITLE")))),CatalogueEntry) val manuscript = I(ns+"manuscript_"+mid,Map("en"->w(h("TITLE"))),Manuscript) if (mid!=w(midP)) manuscript.addProperty(OWL.sameAs,m.createResource(ns+"manuscript_"+w(midP))) val authorVariants = w(h("AUTHOR_VARIANT")).split("\\\\|").map(_.trim) val authorNames = { val t = w(h("AUTHOR_AUTHORITY")).split("\\\\|").map(_.trim) if (t.length!=0) t else authorVariants } val works = { val workNames = w(h("TITLE")).split("\\\\|").map(_.trim).filter { !_.isEmpty } if (workNames.length==authorNames.length) for (i <- 0 until workNames.length;workName = workNames(i);authorName = authorNames(i)) yield I(ns+"work_"+encode(authorName)+"_"+encode(workName),workName,Work) else workNames.map(s => I(ns+"work_"+mid+"_"+encode(s),s,Work)).toSeq } works.foreach(manuscript.addProperty(CIDOC.carries,_)) for (ch <- h;s = w(ch._2)) if (s!="") ch._1 match { case "CAT_ID" => auctionEvent.orElse(transferEvent).orElse(afterPossessionEvent).orElse(beforePossessionEvent).getOrElse(throw new IllegalArgumentException("BAD: "+headers.zip(w).toMap)).addProperty(catalogueEntry,ce) case "CAT_OR_LOT_NUM" => ce.addProperty(DCTerms.isPartOf,c) case "CAT_DATE" => val (p,e) = try { (CIDOC.has_timeSpan,Left(makeTimeSpan(s.substring(0,4)+"-"+s.substring(4,6)+'-'+s.substring(6),makeDateTime(s.substring(0,4), s.substring(4,6), s.substring(6))))) } catch { case ex: Exception => (DCTerms.date,Right(s)) } e match { case Left(v) => auctionEvent.foreach(_.addProperty(p,v));transferEvent.foreach(_.addProperty(p,v)) case Right(v) => auctionEvent.foreach(_.addProperty(p,v));transferEvent.foreach(_.addProperty(p,v)) } case "MANUSCRIPT_DATE" => s.split("[\\\\|,]").filter(!_.isEmpty).foreach(s => try { manuscript.addProperty(CIDOC.has_timeSpan,makeTimeSpan(s,makeDateTime(s,"",""))) } catch { case e : Exception => println("Bad date for manuscript",e);manuscript.addProperty(DCTerms.date,s) }) case "MANUSCRIPT_ID" => beforePossessionEvent.foreach(_.addProperty(CIDOC.transferred_title_of,manuscript)) afterPossessionEvent.foreach(_.addProperty(CIDOC.transferred_title_of,manuscript)) ce.addProperty(represents,manuscript) case "DUPLICATE_MS" => s.split(",").map(_.trim).filter { x => !x.isEmpty && x!=mid }.foreach { x => manuscript.addProperty(OWL.sameAs,m.createResource(ns+"manuscript_"+x)) } case "SELLER" => beforePossessionEvent.foreach(_.addProperty(CIDOC.transferred_title_to,I(ns+"actor_"+encode(s),s,CIDOC.Actor))) transferEvent.foreach(_.addProperty(seller,I(ns+"actor_"+encode(s),s,CIDOC.Actor))) case "SELLER2" => transferEvent.foreach(_.addProperty(CIDOC.transferred_title_from,I(ns+"actor_"+encode(s),s,CIDOC.Actor))) case "BUYER" => afterPossessionEvent.foreach(_.addProperty(CIDOC.transferred_title_to,I(ns+"actor_"+encode(s),s,CIDOC.Actor))) transferEvent.foreach(_.addProperty(CIDOC.transferred_title_to,I(ns+"actor_"+encode(s),s,CIDOC.Actor))) case "INSTITUTION" => afterPossessionEvent.foreach(_.addProperty(CIDOC.transferred_title_to,I(ns+"actor_"+encode(s),s,CIDOC.Legal_Body))) transferEvent.foreach(_.addProperty(CIDOC.transferred_title_to,I(ns+"actor_"+encode(s),s,CIDOC.Legal_Body))) case "ARTIST" => s.split("\\\\|").map(_.trim).filter {!_.isEmpty }.foreach { x => manuscript.addProperty(artist,I(ns+"actor_"+encode(x),x,CIDOC.Actor)) } case "SCRIBE" => s.split("\\\\|").map(_.trim).filter {!_.isEmpty }.foreach { x => manuscript.addProperty(scribe,I(ns+"actor_"+encode(x),x,CIDOC.Actor)) } case "AUTHOR_AUTHORITY" => if (works.length==authorNames.length) for (i <- 0 until works.length;work = works(i);a = authorNames(i);if !a.isEmpty) work.addProperty(DCTerms.creator,I(ns+"actor_"+encode(a),a,CIDOC.Person)) else authorNames.foreach { a => manuscript.addProperty(DCTerms.creator,I(ns+"actor_"+encode(a),a,CIDOC.Person)) } case "AUTHOR_VARIANT" => if (authorNames.length==authorVariants.length) for (i <- 0 until authorNames.length;a = authorNames(i);av = authorVariants(i);if a!=av && !av.isEmpty) I(ns+"actor_"+encode(a),a,CIDOC.Person).addProperty(SKOS.altLabel,av) else println("lost variants: "+authorVariants.toSeq+" vs "+authorNames.toSeq) case "COMMENTS" => auctionEvent.orElse(transferEvent).orElse(afterPossessionEvent).orElse(beforePossessionEvent).getOrElse(throw new IllegalArgumentException("BAD: "+headers.zip(w).toMap)).addProperty(RDFS.comment,s) case "SOLD" => s match { case "NO" | "N?" | "GIFT" => case "YES" => auctionEvent.foreach(_.addProperty(sold,"true",XSDDatatype.XSDboolean)) case "WD" => auctionEvent.foreach(_.addProperty(sold,"withdrawn")) case s => auctionEvent.foreach(_.addProperty(sold,s)); println("Unknown sold: "+s) } case "PROVENANCE" => var list = m.createResource() beforePossessionEvent.orElse(afterPossessionEvent).foreach{e => var provs = s.split("\\\\|").map(_.trim).filter(!_.isEmpty) if (provs.last==primarySeller) provs=provs.dropRight(1) if (!provs.isEmpty) { val pe = provs.foldLeft(null.asInstanceOf[Resource]) { case (r,s) => val e = I(ns+"event_possession_"+mid+"_"+encode(s),Map("en"->("Possession by "+s+" of "+w(h("TITLE")))),Possession) e.addProperty(CIDOC.transferred_title_of,manuscript) e.addProperty(CIDOC.transferred_title_to,I(ns+"actor_"+encode(s),s,CIDOC.Actor)) if (r!=null) { r.addProperty(CIDOC.occurs_before,e) e.addProperty(CIDOC.occurs_after,r) } e } pe.addProperty(CIDOC.occurs_before,e) e.addProperty(CIDOC.occurs_after,pe) } } case "MAT" => s.split("[\\\\|,]").map(_.trim).filter(!_.isEmpty).foreach { case "V" | "Vellum" => manuscript.addProperty(material,Vellum) case "P" | "Paper" => manuscript.addProperty(material,Paper) case "PV" | "VP" => manuscript.addProperty(material,Vellum);manuscript.addProperty(material,Paper) case "" | "|" | "," => case s => manuscript.addProperty(material,I(ns+"material_"+encode(s),Map("en"->s),CIDOC.Material)) } case "MANUSCRIPT_BINDING" => manuscript.addProperty(binding,s) case "LNG" => val lngs = s.split("[\\\\|,]").map(_.trim).filter(!_.isEmpty) if (works.length==lngs.length) for (i <- 0 until works.length;work = works(i);l = lngs(i)) work.addProperty(DCTerms.language,I(ns+"language_"+encode(l),Map("en"->l),CIDOC.Language)) else lngs.foreach { l => manuscript.addProperty(DCTerms.language,I(ns+"language_"+encode(l),Map("en"->l),CIDOC.Language)) } case "FOLIOS" => manuscript.addProperty(folios,s,XSDDatatype.XSDinteger) case "COL" => manuscript.addProperty(columns,s,XSDDatatype.XSDinteger) case "LINES" => manuscript.addProperty(lines,s,XSDDatatype.XSDinteger) case "HGT" => manuscript.addProperty(height,s,XSDDatatype.XSDinteger) case "WDT" => manuscript.addProperty(width,s,XSDDatatype.XSDinteger) case "H_INIT" => manuscript.addProperty(historiated_initials,s,XSDDatatype.XSDinteger) case "D_INIT" => manuscript.addProperty(decorated_initials, s,XSDDatatype.XSDinteger) case "MIN_FL" => manuscript.addProperty(fullpage_miniatures,s,XSDDatatype.XSDinteger) case "MIN_SM" => manuscript.addProperty(small_miniatures,s,XSDDatatype.XSDinteger) case "MIN_LG" => manuscript.addProperty(large_miniatures,s,XSDDatatype.XSDinteger) case "MIN_UN" => manuscript.addProperty(unspecified_miniatures,s,XSDDatatype.XSDinteger) case "PRICE" => ce.addProperty(price,s,XSDDatatype.XSDdecimal) case "CURRENCY" => if (s.matches("[A-Z]+")) ce.addProperty(currency,I(ns+"currency_"+encode(s),s,Currency)) else ce.addProperty(currency,s) case "ENTRY_COMMENTS" => auctionEvent.orElse(transferEvent).orElse(afterPossessionEvent).orElse(beforePossessionEvent).getOrElse(throw new IllegalArgumentException("BAD: "+headers.zip(w).toMap)).addProperty(RDFS.comment,s,"en") case "SECONDARY_SOURCE" => auctionEvent.orElse(transferEvent).orElse(afterPossessionEvent).orElse(beforePossessionEvent).getOrElse(throw new IllegalArgumentException("BAD: "+headers.zip(w).toMap)).addProperty(SKOS.scopeNote,s,"en") case "TITLE" | "X" => case "MANUSCRIPT_LINK" => manuscript.addProperty(FOAF.page,ResourceFactory.createResource(s)) case "PLACE" => manuscript.addProperty(CIDOC.took_place_at,I(ns+"location_"+encode(s),Map("en"->s),CIDOC.Place)) case "MANUSCRIPT_USE" => s.split("\\\\|").map(_.trim).filter {!_.isEmpty }.foreach { s => manuscript.addProperty(liturgicalUse,I(ns+"location_"+encode(s),Map("en"->s),CIDOC.Place)) } case "POSSIBLE_DUPS" => s.split(",").map(_.trim).filter {!_.isEmpty }.foreach { x => manuscript.addProperty(possiblySameAs,m.createResource(ns+"manuscript_"+x)) } case "CIRCA" => manuscript.addProperty(circa,s) //FIXME case "CURRENT_LOCATION" => manuscript.addProperty(current_location,s) //FIXME case "ISAPPROVED" | "ADDEDON" | "ADDEDBY" | "LAST_MODIFIED" | "LAST_MODIFIED_BY" => auctionEvent.orElse(transferEvent).orElse(afterPossessionEvent).orElse(beforePossessionEvent).getOrElse(throw new IllegalArgumentException("BAD: "+headers.zip(w).toMap)).addProperty(EDP(ch._1),s); case p => println(p+":"+s); auctionEvent.orElse(transferEvent).orElse(afterPossessionEvent).orElse(beforePossessionEvent).getOrElse(throw new IllegalArgumentException("BAD: "+headers.zip(w).toMap)).addProperty(EDP(p),s); } i+=1 }} //MANUSCRIPT_ID, DUPLICATE_MS, CAT_DATE, SELLER, SELLER2, INSTITUTION, BUYER, CAT_ID, CAT_OR_LOT_NUM, PRICE, CURRENCY, SOLD, SECONDARY_SOURCE, CURRENT_LOCATION, AUTHOR_AUTHORITY, AUTHOR_VARIANT, TITLE, LNG, MAT, PLACE, MANUSCRIPT_USE, MANUSCRIPT_DATE, CIRCA, ARTIST, SCRIBE, FOLIOS, COL, LINES, HGT, WDT, MANUSCRIPT_BINDING, PROVENANCE 1, PROVENANCE 2, PROVENANCE 3, PROVENANCE 4, PROVENANCE 5, PROVENANCE 6, PROVENANCE 7, PROVENANCE 8, PROVENANCE 9, PROVENANCE 10, PROVENANCE 11, PROVENANCE 12, PROVENANCE 13, PROVENANCE 14, PHILLIPPS_NUM, COMMENTS 1, COMMENTS 2, COMMENTS 3, COMMENTS 4, COMMENTS 5, COMMENTS 6, COMMENTS 7, COMMENTS 8, COMMENTS 9, COMMENTS 10, COMMENTS 11, COMMENTS 12, COMMENTS 13, COMMENTS 14, COMMENTS 15, COMMENTS 16, COMMENTS 17, COMMENTS 18, COMMENTS 19, MANUSCRIPT_LINK m.setNsPrefixes(PrefixMapping.Standard) m.setNsPrefix("org",ORG.ns) m.setNsPrefix("dcterms",DCTerms.NS) m.setNsPrefix("crm",CIDOC.ns) m.setNsPrefix("skos",SKOS.ns) m.setNsPrefix("s",ns) m.setNsPrefix("ss",sns) RDFDataMgr.write(new FileOutputStream("schoenberg.ttl"), m, RDFFormat.TTL) } }
jiemakel/anything2rdf
src/main/scala/schoenbergcsv2rdf.scala
Scala
mit
19,726
//package scray.querying.source // //import scray.querying.description.Row //import scray.querying.queries.DomainQuery //import scray.querying.description.TableIdentifier //import scray.querying.description.Column //import scala.reflect.ClassTag //import scray.querying.description.ColumnOrdering //import scray.querying.description.QueryRange //import scray.querying.description.internal.Domain //import scray.querying.description.ColumnGrouping //import com.typesafe.scalalogging.slf4j.LazyLogging // ///** // * a hash join source for indexes that use ranges and sets of references // */ //abstract class AbstractRangeSetHashJoinSource[Q <: DomainQuery, M, R /* <: Product */, V]( // indexsource: LazySource[Q], // lookupSource: KeyedSource[R, V], // lookupSourceTable: TableIdentifier, // lookupkeymapper: Option[M => R] = None, // maxLimit: Option[Long] = None)(implicit tag: ClassTag[M]) // extends AbstractHashJoinSource[Q, M, R, V](indexsource, lookupSource, lookupSourceTable, lookupkeymapper) // with LazyLogging { // // /** // * the name of the column in lookupsource that will be the primary // * key that is used as reference indexed // */ // @inline protected def getReferenceLookupSourceColumn: Column // // /** // * the column in the index that contains a set of references into lookupsource // */ // @inline protected def getReferencesIndexColumn: Column // // /** // * the column in the idnex that contains the value of the index data // */ // @inline protected def getValueIndexColumn: Column // // /** // * a primary column in the index used as a criteria shrinking // * the number of possible results // */ // @inline protected def getPrefixIndexColumn: Column // // // /** // * creates a domain query that matches the provided domains and trys to reflect // * the original query options // */ // @inline protected def createDomainQuery[T](query: Q, domains: List[Domain[_]])(implicit ord: Ordering[T]): Q = { // val resultColumns = Set(getPrefixIndexColumn, // getValueIndexColumn, getReferencesIndexColumn) // val range = query.getQueryRange.map { qrange => // val skipLines = qrange.skip.getOrElse(0L) // QueryRange(None, qrange.limit.map(_ + skipLines).orElse(maxLimit.map(_ + skipLines)), None) // } // DomainQuery(query.getQueryID, query.getQueryspace, query.querySpaceVersion, resultColumns, getPrefixIndexColumn.table, // domains, Some(ColumnGrouping(getValueIndexColumn)), // Some(ColumnOrdering[T](getValueIndexColumn, // query.getOrdering.filter(_.descending).isDefined)), range).asInstanceOf[Q] // } // // /** // * we return an array of references we want to look up // */ // override protected def getJoinablesFromIndexSource(index: Row): Array[M] = { // index.getColumnValue[M](getReferencesIndexColumn) match { // case Some(refs) => refs match { // case travs: TraversableOnce[M] => travs.asInstanceOf[TraversableOnce[M]].toArray // case travs: M => Array[M](travs) // } // case None => Array[M]() // } // } // // override protected def isOrderedAccordingToOrignalOrdering(transformedQuery: Q, ordering: ColumnOrdering[_]): Boolean = // ordering.column == getReferenceLookupSourceColumn // // /** // * since this is a true index only, we only return the referred columns // */ // override def getColumns: Set[Column] = lookupSource.getColumns //}
scray/scray
scray-querying/modules/scray-querying/src/main/scala/scray/querying/source/AbstractRangeSetHashJoinSource.scala
Scala
apache-2.0
3,466
/* * Copyright 2015 Foundational Development * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package pro.foundev.calculations import org.apache.spark.rdd.RDD import org.apache.spark.SparkContext._ import pro.foundev.dto.{IpLog, SessionReport} /** * */ class LogCalculatorImpl extends LogCalculator{ /** * returns state with most clicks and the average clicks per user * @param table * @return */ override def sessionReport(table: RDD[IpLog]):SessionReport = { val biggestState: String = table. map(log => (log.originState, log.urls.size)) .reduceByKey((t1, t2) => t1 + t2) .reduce((t1, t2) => { if (t1._2 > t2._2) { t1 } else { t2 } })._1 val averageClicksBySession: Double = table.map(log => log.urls.size).mean() new SessionReport(biggestState, averageClicksBySession) } }
rssvihla/datastax_work
spark_commons/benchmarks/low_latency_spark/src/main/scala/pro/foundev/calculations/LogCalculatorImpl.scala
Scala
apache-2.0
1,403
import Dependencies._ import Merging._ import Testing._ import Version._ import sbt.Keys.{scalacOptions, _} import sbt._ import sbtassembly.AssemblyPlugin.autoImport._ import org.scalafmt.sbt.ScalafmtPlugin.autoImport.scalafmtOnCompile object Settings { lazy val artifactory = "https://artifactory.broadinstitute.org/artifactory/" lazy val commonResolvers = List( "artifactory-releases" at artifactory + "libs-release", "artifactory-snapshots" at artifactory + "libs-snapshot" ) //coreDefaultSettings + defaultConfigs = the now deprecated defaultSettings lazy val commonBuildSettings = Defaults.coreDefaultSettings ++ Defaults.defaultConfigs ++ Seq( javaOptions += "-Xmx2G", javacOptions ++= Seq("--release", "11"), scalacOptions in (Compile, console) --= Seq("-Ywarn-unused:imports", "-Xfatal-warnings"), scalacOptions in Test -= "-Ywarn-dead-code" // due to https://github.com/mockito/mockito-scala#notes ) // recommended scalac options by https://tpolecat.github.io/2017/04/25/scalac-flags.html lazy val commonCompilerSettings = Seq( "-target:jvm-1.11", "-deprecation", // Emit warning and location for usages of deprecated APIs. "-encoding", "utf-8", // Specify character encoding used by source files. "-feature", // Emit warning and location for usages of features that should be imported explicitly. "-language:existentials", // Existential types (besides wildcard types) can be written and inferred "-unchecked", // Enable additional warnings where generated code depends on assumptions. "-Xcheckinit", // Wrap field accessors to throw an exception on uninitialized access. "-Xfatal-warnings", // Fail the compilation if there are any warnings. "-Xlint:adapted-args", // Warn if an argument list is modified to match the receiver. "-Xlint:constant", // Evaluation of a constant arithmetic expression results in an error. "-Xlint:delayedinit-select", // Selecting member of DelayedInit. "-Xlint:doc-detached", // A Scaladoc comment appears to be detached from its element. "-Xlint:missing-interpolator", // A string literal appears to be missing an interpolator id. "-Xlint:option-implicit", // Option.apply used implicit view. "-Xlint:package-object-classes", // Class or object defined in package object. "-Xlint:poly-implicit-overload", // Parameterized overloaded implicit methods are not visible as view bounds. "-Xlint:private-shadow", // A private field (or class parameter) shadows a superclass field. "-Xlint:stars-align", // Pattern sequence wildcard must align with sequence component. "-Xlint:type-parameter-shadow", // A local type parameter shadows a type already in scope. "-Ywarn-dead-code", // Warn when dead code is identified. "-Ywarn-extra-implicit", // Warn when more than one implicit parameter section is defined. "-Ywarn-numeric-widen", // Warn when numerics are widened. "-Ywarn-unused:implicits", // Warn if an implicit parameter is unused. "-Ywarn-unused:imports", // Warn if an import selector is not referenced. "-language:postfixOps", "-Ymacro-annotations" ) //sbt assembly settings lazy val commonAssemblySettings = Seq( assemblyMergeStrategy in assembly := customMergeStrategy((assemblyMergeStrategy in assembly).value), test in assembly := {} ) //common settings for all sbt subprojects lazy val commonSettings = commonBuildSettings ++ commonAssemblySettings ++ commonTestSettings ++ List( organization := "org.broadinstitute.dsde.workbench", scalaVersion := "2.13.5", resolvers ++= commonResolvers, scalacOptions ++= commonCompilerSettings ) val scalafmtSettings = List(scalafmtOnCompile := true) //the full list of settings for the root project that's ultimately the one we build into a fat JAR and run //coreDefaultSettings (inside commonSettings) sets the project name, which we want to override, so ordering is important. //thus commonSettings needs to be added first. lazy val rootSettings = commonSettings ++ List( name := "sam", libraryDependencies ++= rootDependencies ) ++ commonAssemblySettings ++ rootVersionSettings ++ scalafmtSettings }
broadinstitute/sam
project/Settings.scala
Scala
bsd-3-clause
4,211
package org.ai4fm.proofprocess.core.store import org.ai4fm.proofprocess.ProofEntry import org.eclipse.core.runtime.CoreException /** * @author Andrius Velykis */ trait IProofEntryTracker { @throws(classOf[CoreException]) def initTrackLatestEntry(f: ProofEntry => Unit) def dispose() }
andriusvelykis/proofprocess
org.ai4fm.proofprocess.core/src/org/ai4fm/proofprocess/core/store/IProofEntryTracker.scala
Scala
epl-1.0
302
/* # Copyright 2016 Georges Lipka # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. */ package com.glipka.easyReactJS.react.jsx import scala.scalajs.js import scala.scalajs.js._ import org.scalajs.dom.html import com.glipka.easyReactJS.react._ import js.{ UndefOr, Any, Function => JFn } import js.annotation.{ JSBracketAccess, JSName } import js.{ Any => jAny } import org.scalajs.dom._ // https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/react/react.d.ts @js.native abstract class ElementAttributesProperty extends js.Any { var props : Any = js.native }
glipka/Easy-React-With-ScalaJS
src/main/scala/com/glipka/easyReactJS/react/jsx/ElementAttributesProperty.scala
Scala
apache-2.0
1,077
package com.twitter.finagle.exp.mysql import com.twitter.finagle.exp.mysql._ import com.twitter.finagle.{Service, ServiceFactory} import com.twitter.util.{Await, Closable, Future, Time} import org.junit.runner.RunWith import org.mockito.Matchers.any import org.mockito.Mockito.{times, verify, when} import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import org.scalatest.mock.MockitoSugar @RunWith(classOf[JUnitRunner]) class PrepareCacheTest extends FunSuite with MockitoSugar { test("cache prepare requests") { val dispatcher = mock[Service[Request, Result]] val stmtId = 2 when(dispatcher(any[Request])).thenReturn(Future.value( PrepareOK(stmtId, 1, 1, 0))) val svc = new PrepareCache(dispatcher, 11) val r0 = PrepareRequest("SELECT 0") svc(r0) svc(r0) verify(dispatcher, times(1)).apply(r0) for (i <- 1 to 10) svc(PrepareRequest("SELECT %d".format(i))) svc(PrepareRequest("SELECT 5")) verify(dispatcher, times(1)).apply(PrepareRequest("SELECT 5")) // dispatch current eldest. // we should maintain access order. svc(r0) verify(dispatcher, times(1)).apply(r0) // fill cache and evict eldest. svc(PrepareRequest("SELECT 11")) verify(dispatcher, times(1)).apply(CloseRequest(stmtId)) // evicted element is not in cache. svc(PrepareRequest("SELECT 1")) verify(dispatcher, times(2)).apply(PrepareRequest("SELECT 1")) } }
jpederzolli/finagle
finagle-mysql/src/test/scala/com/twitter/finagle/mysql/unit/PreparedStatementTest.scala
Scala
apache-2.0
1,439
import sbt._ class TLFProject(info: ProjectInfo) extends DefaultProject(info) { val newReleaseToolsRepository = "Scala Tools Repository" at " http://nexus.scala-tools.org/content/repositories/snapshots/" val scalatest = "org.scalatest" % "scalatest" % "1.2-for-scala-2.8.0.RC6-SNAPSHOT" override def managedStyle = ManagedStyle.Maven lazy val publishTo = Resolver.file("Publish", new java.io.File("../benb.github.com/maven/")) }
benb/tlf
project-old/build/src/Tlf.scala
Scala
mit
444
package solarsystemscalemodel.mithril //A Mithril implementation of scalatags (based on the scalatags DOM implemention) trait Tags extends scalatags.generic.Tags[MithrilBuilder, MithrilElement, MithrilNode] { // Root Element val html = "html".tag[MithrilElement] // Document Metadata val head = "head".tag[MithrilElement] val base = "base".voidTag[MithrilElement] val link = "link".voidTag[MithrilElement] val meta = "meta".voidTag[MithrilElement] // Scripting val script = "script".tag[MithrilElement] // Sections val body = "body".tag[MithrilElement] val h1 = "h1".tag[MithrilElement] val h2 = "h2".tag[MithrilElement] val h3 = "h3".tag[MithrilElement] val h4 = "h4".tag[MithrilElement] val h5 = "h5".tag[MithrilElement] val h6 = "h6".tag[MithrilElement] val header = "header".tag[MithrilElement] val footer = "footer".tag[MithrilElement] // Grouping content val p = "p".tag[MithrilElement] val hr = "hr".voidTag[MithrilElement] val pre = "pre".tag[MithrilElement] val blockquote = "blockquote".tag[MithrilElement] val ol = "ol".tag[MithrilElement] val ul = "ul".tag[MithrilElement] val li = "li".tag[MithrilElement] val dl = "dl".tag[MithrilElement] val dt = "dt".tag[MithrilElement] val dd = "dd".tag[MithrilElement] val figure = "figure".tag[MithrilElement] val figcaption = "figcaption".tag[MithrilElement] val div = "div".tag[MithrilElement] // Text-level semantics val a = "a".tag[MithrilElement] val em = "em".tag[MithrilElement] val strong = "strong".tag[MithrilElement] val small = "small".tag[MithrilElement] val s = "s".tag[MithrilElement] val cite = "cite".tag[MithrilElement] val code = "code".tag[MithrilElement] val sub = "sub".tag[MithrilElement] val sup = "sup".tag[MithrilElement] val i = "i".tag[MithrilElement] val b = "b".tag[MithrilElement] val u = "u".tag[MithrilElement] val span = "span".tag[MithrilElement] val br = "br".voidTag[MithrilElement] val wbr = "wbr".voidTag[MithrilElement] // Edits val ins = "ins".tag[MithrilElement] val del = "del".tag[MithrilElement] // Embedded content val img = "img".voidTag[MithrilElement] val iframe = "iframe".tag[MithrilElement] val embed = "embed".voidTag[MithrilElement] val `object` = "object".tag[MithrilElement] val param = "param".voidTag[MithrilElement] val video = "video".tag[MithrilElement] val audio = "audio".tag[MithrilElement] val source = "source".voidTag[MithrilElement] val track = "track".voidTag[MithrilElement] val canvas = "canvas".tag[MithrilElement] val map = "map".tag[MithrilElement] val area = "area".voidTag[MithrilElement] // Tabular data val table = "table".tag[MithrilElement] val caption = "caption".tag[MithrilElement] val colgroup = "colgroup".tag[MithrilElement] val col = "col".voidTag[MithrilElement] val tbody = "tbody".tag[MithrilElement] val thead = "thead".tag[MithrilElement] val tfoot = "tfoot".tag[MithrilElement] val tr = "tr".tag[MithrilElement] val td = "td".tag[MithrilElement] val th = "th".tag[MithrilElement] // Forms val form = "form".tag[MithrilElement] val fieldset = "fieldset".tag[MithrilElement] val legend = "legend".tag[MithrilElement] val label = "label".tag[MithrilElement] val input = "input".voidTag[MithrilElement] val button = "button".tag[MithrilElement] val select = "select".tag[MithrilElement] val datalist = "datalist".tag[MithrilElement] val optgroup = "optgroup".tag[MithrilElement] val option = "option".tag[MithrilElement] val textarea = "textarea".tag[MithrilElement] }
thomasrynne/SolarSystemScaleModel
base/src/main/scala/solarsystemscalemodel/mithril/Tags.scala
Scala
mit
3,589
/** * Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com> */ package lagom case class CliOption[T](private val value: T) { def get: T = value } object CliOption { def apply[T](path: String, default: T)(implicit ev: CliOptionParser[T]): CliOption[T] = ev.parse(path, default) implicit class BooleanCliOption(cliOption: CliOption[Boolean]) { def ifTrue[A](a: => A): Option[A] = if (cliOption.get) Some(a) else None } trait CliOptionParser[T] { def parse(path: String, default: T): CliOption[T] } object CliOptionParser { implicit object BooleanCliOptionParser extends CliOptionParser[Boolean] { def parse(path: String, default: Boolean) = CliOption(sys.props.getOrElse(path, default.toString).toBoolean) } } }
edouardKaiser/lagom
project/CliOptions.scala
Scala
apache-2.0
776
package odelay.netty class Netty3TimerSpec extends odelay.testing.TimerSpec { def newTimer = NettyTimer.newTimer def timerName = "Netty3Timer" }
softprops/odelay
odelay-netty3/src/test/scala/NettyTimerSpec.scala
Scala
mit
150
package com.nhlreplay.parser.playbyplay trait Trimmer { def preserve(text: String): String = text def removeCounts(text: String): String = text.replaceAll("""\\(\\d+\\)""", "") def removeDots(text: String): String = text.replaceAll("""\\.""", "") def toInt(text: String): Int = text.toInt def trimWhitespace(text: String): String = """\\s+""".r.replaceAllIn(text, " ").trim def trimPenaltyReason(reason: String): String = trimWhitespace(reason).toLowerCase def trimShotType(shotType: String): String = { shotType.toLowerCase match { case "backhand" => "shoots on his backhand" case "deflected" => "shoots and it's deflected" case "slap" => "takes a slapshot" case "snap" => "takes a snap shot" case "tip-in" => "tries to tip it in" case "wrap-around" => "tries a wrap-around" case "wrist" => "takes a wrist shot" case x => x } } def trimTarget(target: String): String = { target.toLowerCase match { case "goalpost" => "hits the post" case "hit crossbar" => "hits the crossbar" case "over net" => "over the net" case "wide of net" => "wide" case x => x } } }
peruukki/NHLReplay
app/com/nhlreplay/parser/playbyplay/Trimmer.scala
Scala
mit
1,171
package alexsmirnov.pbconsole import alexsmirnov.scalafx.ObservableImplicits package object print extends ObservableImplicits { }
alexsmirnov/printrbot-g2-console
src/main/scala/alexsmirnov/pbconsole/print/package.scala
Scala
bsd-3-clause
133
/* * Copyright 2013 Toshiyuki Takahashi * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.arcusys.valamis.persistence.common.joda import java.sql._ import com.arcusys.valamis.persistence.common.joda.converter.JodaDateTimeSqlTimestampConverter import scala.slick.driver.{JdbcDriver, MySQLDriver} import org.joda.time._ import slick.profile.RelationalProfile class JodaDateTimeMapper(val driver: JdbcDriver) { import driver._ val typeMapper = new DriverJdbcType[DateTime] with JodaDateTimeSqlTimestampConverter { def zero = new DateTime(0L) def sqlType = java.sql.Types.TIMESTAMP override def sqlTypeName(size: Option[RelationalProfile.ColumnOption.Length]) = driver match { case driver: MySQLDriver => "DATETIME" case _ => columnTypes.timestampJdbcType.sqlTypeName(size) } override def setValue(v: DateTime, p: PreparedStatement, idx: Int): Unit = p.setTimestamp(idx, toSqlType(v)) override def getValue(r: ResultSet, idx: Int): DateTime = fromSqlType(r.getTimestamp(idx)) override def updateValue(v: DateTime, r: ResultSet, idx: Int): Unit = r.updateTimestamp(idx, toSqlType(v)) override def valueToSQLLiteral(value: DateTime) = columnTypes.timestampJdbcType.valueToSQLLiteral(toSqlType(value)) } }
igor-borisov/JSCORM
valamis-slick-support/src/main/scala/com/arcusys/valamis/persistence/common/joda/JodaDateTimeMapper.scala
Scala
gpl-3.0
2,626
package nested_objects_yaml import org.scalacheck.Gen import org.scalacheck.Arbitrary import play.api.libs.json.scalacheck.JsValueGenerators import Arbitrary._ object Generators extends JsValueGenerators { def createNestedObjectsNestedGenerator = _generate(NestedObjectsNestedGenerator) def createNestedObjectsNestedNested2Nested3BottomGenerator = _generate(NestedObjectsNestedNested2Nested3BottomGenerator) def createNestedObjectsNestedNested2Nested3Generator = _generate(NestedObjectsNestedNested2Nested3Generator) def createNestedObjectsPlainGenerator = _generate(NestedObjectsPlainGenerator) def NestedObjectsNestedGenerator = Gen.option(NestedObjectsNestedOptGenerator) def NestedObjectsNestedNested2Nested3BottomGenerator = Gen.option(arbitrary[String]) def NestedObjectsNestedNested2Nested3Generator = Gen.option(NestedObjectsNestedNested2Nested3OptGenerator) def NestedObjectsPlainGenerator = Gen.option(NestedObjectsPlainOptGenerator) def createNestedObjectsNestedOptGenerator = _generate(NestedObjectsNestedOptGenerator) def createNestedObjectsNestedNested2Nested3OptGenerator = _generate(NestedObjectsNestedNested2Nested3OptGenerator) def createNestedObjectsGenerator = _generate(NestedObjectsGenerator) def createNestedObjectsPlainOptGenerator = _generate(NestedObjectsPlainOptGenerator) def createNestedObjectsNestedNested2Generator = _generate(NestedObjectsNestedNested2Generator) def NestedObjectsNestedOptGenerator = for { nested2 <- NestedObjectsNestedNested2Generator } yield NestedObjectsNestedOpt(nested2) def NestedObjectsNestedNested2Nested3OptGenerator = for { bottom <- NestedObjectsNestedNested2Nested3BottomGenerator } yield NestedObjectsNestedNested2Nested3Opt(bottom) def NestedObjectsGenerator = for { plain <- NestedObjectsPlainGenerator nested <- NestedObjectsNestedGenerator } yield NestedObjects(plain, nested) def NestedObjectsPlainOptGenerator = for { simple <- arbitrary[String] } yield NestedObjectsPlainOpt(simple) def NestedObjectsNestedNested2Generator = for { nested3 <- NestedObjectsNestedNested2Nested3Generator } yield NestedObjectsNestedNested2(nested3) def _generate[T](gen: Gen[T]) = (count: Int) => for (i <- 1 to count) yield gen.sample }
zalando/play-swagger
play-scala-generator/src/test/resources/expected_results/test_data/nested_objects_yaml.scala
Scala
mit
2,380
package org.tuubes.core.engine /** * An actor is a basic isolated "entity" that reacts to the messages it receives. The only way to * interact with an actor is to send it an [[ActorMessage]]. * * @author TheElectronWill */ trait Actor { /** * Sends a message to this actor. * * @param msg the message */ def !(msg: ActorMessage)(implicit currentGroup: ExecutionGroup): Unit /** * Reacts to a received message. * * @param msg the received message */ protected def onMessage(msg: ActorMessage) /** * Filters a message before it is sent to this actor. Returns true if and only if the message * can be sent, false if it must be ignored. An ignored message will never be processed by * the [[onMessage()]] method. * * @param msg the message that is going to be sent * @return true if the message can be sent, false to ignore the message */ protected def filter(msg: ActorMessage): Boolean /** * Gets the ID that uniquely identifies this actor. * * @return the actor's unique id */ def id: ActorId }
mcphoton/Photon-Server
core/src/main/scala/org/tuubes/core/engine/Actor.scala
Scala
lgpl-3.0
1,059
package com.v_standard.scalikejdbc.orm.dao import com.v_standard.scalikejdbc.orm.{DBSpecBase, PagingSeq} import com.v_standard.scalikejdbc.orm.condition.{Condition, LogicalExpressionCondition} import com.v_standard.scalikejdbc.orm.definition.{AbstractEntityDefinition, Column, EntityBase, ReferenceTable} import com.v_standard.scalikejdbc.orm.query.Asc import com.v_standard.scalikejdbc.orm.query.JoinType import com.v_standard.scalikejdbc.orm.query.JoinType._ import com.v_standard.scalikejdbc.orm.query.{AbstractQuery, Columns, DateTimeQueryColumn, IntQueryColumn, OrderBy, Query, StringQueryColumn, Where} import com.v_standard.scalikejdbc.orm.query.OrderByTypeFactory.columnBase2OrderByTypeFactory import java.sql.{Timestamp, Types} import org.joda.time.DateTime import org.scalatest.FunSpec import org.scalatest.matchers.ShouldMatchers import scala.collection.mutable import scala.collection.mutable.ListBuffer import scalikejdbc.{AutoSession, DB, DBSession, WrappedResultSet} /** * ReadDao テストスペッククラス。 */ class ReadDaoSpec extends FunSpec with ShouldMatchers with DBSpecBase { val DB_NAME: String = "read_dao" case class Tbl3(id: Option[Int], tbl1Id1: Option[Int], tbl1Id2: Option[Int], tbl1ByTbl1Id1: Option[Tbl1], tbl1ByTbl1Id2: Option[Tbl1]) extends EntityBase class Tbl3Definition extends AbstractEntityDefinition[Tbl3]("tbl3") { val id = Column[Tbl3]("id", Types.INTEGER, true, true, Some(1), _.id) val tbl1Id1 = Column[Tbl3]("tbl1_id1", Types.INTEGER, true, false, None, _.tbl1Id1) val tbl1Id2 = Column[Tbl3]("tbl1_id2", Types.INTEGER, true, false, None, _.tbl1Id2) val columns = List(id, tbl1Id1, tbl1Id2) override val foreignKeys = Map( "tbl3_tbl1_id1_fkey" -> ReferenceTable(Tbl1Def.entityName, Seq((tbl1Id1, Tbl1Def.id))), "tbl3_tbl1_id2_fkey" -> ReferenceTable(Tbl1Def.entityName, Seq((tbl1Id2, Tbl1Def.id))) ) } object Tbl3Def extends Tbl3Definition class Tbl3QueryColumns(alias: String) { val id = new IntQueryColumn(Tbl3Def.id, alias) val tbl1Id1 = new IntQueryColumn(Tbl3Def.tbl1Id1, alias) val tbl1Id2 = new IntQueryColumn(Tbl3Def.tbl1Id2, alias) } class Tbl3Query(parentQuery: Option[AbstractQuery[_, _, _, _]] = None, alias: String = "") extends AbstractQuery[Tbl3, Tbl3Definition, Tbl3QueryColumns, Tbl3Query](Tbl3Def, parentQuery, alias) { val where = new Where(this, new Tbl3QueryColumns(tableAlias)) val columns = new Columns[Tbl3Query, Tbl3, Tbl3Definition](this, Tbl3Def) val orderBy = new OrderBy[Tbl3Query, Tbl3Definition](this, tableAlias, Tbl3Def) override protected def joinList: Seq[(String, Option[(AbstractQuery[_, _, _, _], JoinType)], AbstractEntityDefinition[_])] = Seq(("tbl3_tbl1_id1_fkey", joinTbl1ByTbl1Id1, Tbl1Def), ("tbl3_tbl1_id2_fkey", joinTbl1ByTbl1Id2, Tbl1Def)) def createEntity(rs: WrappedResultSet, startIndex: Int): (Int, Option[Tbl3]) = { val (max1, indexes) = columns.queryColumnIndexes(startIndex, !parentQuery.getOrElse(this).distinct) val (max2, tbl1ByTbl1Id1) = joinTbl1ByTbl1Id1.map(j => j._1.createEntity(rs, max1)).getOrElse(max1 -> None) val (max3, tbl1ByTbl1Id2) = joinTbl1ByTbl1Id2.map(j => j._1.createEntity(rs, max2)).getOrElse(max2 -> None) if (!indexes.exists { i => !i.isEmpty && !rs.anyOpt(i.get).isEmpty }) (max3, None) else (max3, Option(new Tbl3( indexes(0).flatMap(rs.intOpt(_)), indexes(1).flatMap(rs.intOpt(_)), indexes(2).flatMap(rs.intOpt(_)), tbl1ByTbl1Id1, tbl1ByTbl1Id2 ))) } protected var joinTbl1ByTbl1Id1: Option[(Tbl1Query, JoinType)] = None def tbl1ByTbl1Id1: Tbl1Query = getQuery(joinTbl1ByTbl1Id1) private def getTbl1QueryByTbl1Id1(join: JoinType, alias: String): Tbl1Query = { joinTbl1ByTbl1Id1.map(v => joinQuery(joinTbl1ByTbl1Id1, join)).getOrElse { joinTbl1ByTbl1Id1 = Option((new Tbl1Query(Option(this), alias), join)) joinTbl1ByTbl1Id1.get._1 } } def innerJoinTbl1ByTbl1Id1(alias: String = ""): Tbl1Query = getTbl1QueryByTbl1Id1(JoinType.INNER, alias) def leftJoinTbl1ByTbl1Id1(alias: String = ""): Tbl1Query = getTbl1QueryByTbl1Id1(JoinType.LEFT, alias) protected var joinTbl1ByTbl1Id2: Option[(Tbl1Query, JoinType)] = None def tbl1ByTbl1Id2: Tbl1Query = getQuery(joinTbl1ByTbl1Id2) private def getTbl1QueryByTbl1Id2(join: JoinType, alias: String): Tbl1Query = { joinTbl1ByTbl1Id2.map(v => joinQuery(joinTbl1ByTbl1Id2, join)).getOrElse { joinTbl1ByTbl1Id2 = Option((new Tbl1Query(Option(this), alias), join)) joinTbl1ByTbl1Id2.get._1 } } def innerJoinTbl1ByTbl1Id2(alias: String = ""): Tbl1Query = getTbl1QueryByTbl1Id2(JoinType.INNER, alias) def leftJoinTbl1ByTbl1Id2(alias: String = ""): Tbl1Query = getTbl1QueryByTbl1Id2(JoinType.LEFT, alias) } class Tbl3Dao extends ReadDao[Tbl3, Tbl3Definition, Tbl3QueryColumns, Tbl3Query] case class Tbl2(id: Option[Int], name: Option[String]) extends EntityBase { val tbl1List: ListBuffer[Tbl1] = ListBuffer.empty[Tbl1] } class Tbl2Definition extends AbstractEntityDefinition[Tbl2]("tbl2") { val id = Column[Tbl2]("id", Types.INTEGER, true, true, Some(1), _.id) val name = Column[Tbl2]("name", Types.VARCHAR, false, false, None, _.name) val columns = List(id, name) } object Tbl2Def extends Tbl2Definition class Tbl2QueryColumns(alias: String) { val id = new IntQueryColumn(Tbl2Def.id, alias) val name = new StringQueryColumn(Tbl2Def.name, alias) } class Tbl2Query(parentQuery: Option[AbstractQuery[_, _, _, _]] = None, alias: String = "") extends AbstractQuery[Tbl2, Tbl2Definition, Tbl2QueryColumns, Tbl2Query](Tbl2Def, parentQuery, alias) with WithTbl1{ val where = new Where(this, new Tbl2QueryColumns(tableAlias)) val columns = new Columns[Tbl2Query, Tbl2, Tbl2Definition](this, Tbl2Def) val orderBy = new OrderBy[Tbl2Query, Tbl2Definition](this, tableAlias, Tbl2Def) def createEntity(rs: WrappedResultSet, startIndex: Int): (Int, Option[Tbl2]) = { val (max1, indexes) = columns.queryColumnIndexes(startIndex, !parentQuery.getOrElse(this).distinct) if (!indexes.exists { i => !i.isEmpty && !rs.anyOpt(i.get).isEmpty }) (max1, None) else (max1, Option(Tbl2( indexes(0).flatMap(rs.intOpt(_)), indexes(1).flatMap(rs.stringOpt(_)) ))) } } trait JoinTbl2 { self: AbstractQuery[_, _, _, _] => protected var joinTbl2: Option[(Tbl2Query, JoinType)] = None def tbl2: Tbl2Query = getQuery(joinTbl2) private def getTbl2Query(join: JoinType, alias: String): Tbl2Query = { joinTbl2.map(v => joinQuery(joinTbl2, join)).getOrElse { joinTbl2 = Option((new Tbl2Query(Option(this), alias), join)) joinTbl2.get._1 } } def innerJoinTbl2(alias: String = ""): Tbl2Query = getTbl2Query(JoinType.INNER, alias) def leftJoinTbl2(alias: String = ""): Tbl2Query = getTbl2Query(JoinType.LEFT, alias) } class Tbl2Dao extends ReadDao[Tbl2, Tbl2Definition, Tbl2QueryColumns, Tbl2Query] { override def select(query: Tbl2Query)(implicit session: DBSession): Seq[Tbl2] = { val lst = _select(query) selectChildren(query, lst) lst } override def selectEntity(query: Tbl2Query)(implicit session: DBSession): Option[Tbl2] = { val entity = _selectEntity(query) entity.foreach(e => selectChildren(query, Seq(e))) entity } override def selectPage(query: Tbl2Query)(implicit session: DBSession): PagingSeq[Tbl2] = { val lst = _selectPage(query) selectChildren(query, lst) lst } private def selectChildren(query: Tbl2Query, lst: Seq[Tbl2])(implicit session: DBSession) { _selectChildren[Int, Tbl1, Tbl1Query](query.tbl1Query, lst, _.id.get, { (q, values) => q.columns(df => Seq(df.tbl2Id)) q.where(_.tbl2Id IN values) }, new Tbl1Dao().select(_), (m, c) => m(c.tbl2Id.get).tbl1List) } } case class Tbl1(id: Option[Int], number: Option[Int], name: Option[String], ctime: Option[DateTime], tbl2Id: Option[Int], tbl2: Option[Tbl2]) extends EntityBase class Tbl1Definition extends AbstractEntityDefinition[Tbl1]("tbl1") { val id = Column[Tbl1]("id", Types.INTEGER, true, true, Some(1), _.id) val number = Column[Tbl1]("number", Types.INTEGER, false, false, None, _.number) val name = Column[Tbl1]("name", Types.VARCHAR, false, false, None, _.name) val ctime = Column[Tbl1]("ctime", Types.TIMESTAMP, false, false, None, _.ctime) val tbl2Id = Column[Tbl1]("tbl2_id", Types.INTEGER, false, false, None, _.tbl2Id) val columns = List(id, number, name, ctime, tbl2Id) override val foreignKeys = Map( "tbl1_tbl2Id_fkey" -> ReferenceTable(Tbl2Def.entityName, Seq((tbl2Id, Tbl2Def.id))) ) } object Tbl1Def extends Tbl1Definition class Tbl1QueryColumns(alias: String) { val id = new IntQueryColumn(Tbl1Def.id, alias) val number = new IntQueryColumn(Tbl1Def.number, alias) val name = new StringQueryColumn(Tbl1Def.name, alias) val ctime = new DateTimeQueryColumn(Tbl1Def.ctime, alias) val tbl2Id = new IntQueryColumn(Tbl1Def.tbl2Id, alias) } class Tbl1Query(parentQuery: Option[AbstractQuery[_, _, _, _]] = None, alias: String = "") extends AbstractQuery[Tbl1, Tbl1Definition, Tbl1QueryColumns, Tbl1Query](Tbl1Def, parentQuery, alias) with JoinTbl2 { val where = new Where(this, new Tbl1QueryColumns(tableAlias)) val columns = new Columns[Tbl1Query, Tbl1, Tbl1Definition](this, Tbl1Def) val orderBy = new OrderBy[Tbl1Query, Tbl1Definition](this, tableAlias, Tbl1Def) override protected def joinList: Seq[(String, Option[(AbstractQuery[_, _, _, _], JoinType)], AbstractEntityDefinition[_])] = Seq(("tbl1_tbl2Id_fkey", joinTbl2, Tbl2Def)) def createEntity(rs: WrappedResultSet, startIndex: Int): (Int, Option[Tbl1]) = { val (max1, indexes) = columns.queryColumnIndexes(startIndex, !parentQuery.getOrElse(this).distinct) val (max2, tbl2) = joinTbl2.map(j => j._1.createEntity(rs, max1)).getOrElse(max1 -> None) if (!indexes.exists { i => !i.isEmpty && !rs.anyOpt(i.get).isEmpty }) (max2, None) else (max2, Option(Tbl1( indexes(0).flatMap(rs.intOpt(_)), indexes(1).flatMap(rs.intOpt(_)), indexes(2).flatMap(rs.stringOpt(_)), indexes(3).flatMap(i => rs.timestampOpt(i).map(tm => new DateTime(tm.getTime))), indexes(4).flatMap(rs.intOpt(_)), tbl2 ))) } } trait WithTbl1 { private var _tbl1Query: Option[Tbl1Query] = None def tbl1Query: Option[Tbl1Query] = _tbl1Query def withTbl1(f: Tbl1Query => Unit): Tbl1Query = { _tbl1Query.getOrElse { _tbl1Query = Option(new Tbl1Query) f(_tbl1Query.get) _tbl1Query.get } } } class Tbl1Dao extends ReadDao[Tbl1, Tbl1Definition, Tbl1QueryColumns, Tbl1Query] describe("select") { describe("複数データにマッチするクエリー") { it("複数件取得される") { execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(15, '山田太郎', '2013-03-11 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(16, '鈴木一郎', null)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(17, '坂上二郎', '2013-03-13 12:15:12')") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.number >= 16) q.columns(df => Seq(df.number)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(2) res(0) should be (Tbl1(Option(2), Option(16), None, None, None, None)) res(1) should be (Tbl1(Option(3), Option(17), None, None, None, None)) } } } describe("マッチしないクエリー") { it("0 件返される") { execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(15, '山田太郎', '2013-03-11 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(16, '鈴木一郎', null)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(17, '坂上二郎', '2013-03-13 12:15:12')") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.number < 15) q.columns(df => Seq(df.number)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(0) } } } describe("LEFT JOIN クエリー") { it("連結情報も取得される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.leftJoinTbl2() q.where(_.name === "山田太郎") q.columns(df => Seq(df.number)) q.tbl2.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(1) res(0) should be (Tbl1(Option(1), Option(15), None, None, None, Option(Tbl2(Option(1), Option("あいうえお"))))) } } } describe("LEFT JOIN で参照先が無い場合") { it("連結情報は None") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.leftJoinTbl2() q.where(_.name === "北島三郎") q.columns(df => Seq(df.number)) q.tbl2.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(1) res(0) should be (Tbl1(Option(4), Option(18), None, None, None, None)) } } } describe("JOIN クエリー") { describe("OR 条件") { it("連結情報も取得される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.innerJoinTbl2() q.where.orScope { w => w(_.number === 16) w(_.number === 17) } q.columns(df => Seq(df.number)) q.tbl2.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(2) res(0) should be (Tbl1(Option(2), Option(16), None, None, None, Option(Tbl2(Option(1), Option("あいうえお"))))) res(1) should be (Tbl1(Option(3), Option(17), None, None, None, Option(Tbl2(Option(2), Option("かきくけこ"))))) } } } } describe("JOIN で参照先が無い場合") { it("0 件") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.innerJoinTbl2() q.where(_.number === 18) q.columns(df => Seq(df.number)) q.tbl2.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(0) } } } describe("同一テーブルを参照する外部キーが複数ある場合") { it("同一テーブルを複数 JOIN した結果を返す") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("CREATE TABLE tbl3(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, tbl1_id1 INTEGER, tbl1_id2 INTEGER, CONSTRAINT tbl3_tbl1_id1 FOREIGN KEY (tbl1_id1) REFERENCES tbl1(id), CONSTRAINT tbl3_tbl1_id2 FOREIGN KEY (tbl1_id2) REFERENCES tbl1(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl3(tbl1_id1, tbl1_id2) VALUES(2, 4)") val dao = new Tbl3Dao val q = new Tbl3Query q.innerJoinTbl1ByTbl1Id1() q.leftJoinTbl1ByTbl1Id2() q.tbl1ByTbl1Id1.columns(df => Seq(df.name)) q.tbl1ByTbl1Id2.columns(df => Seq(df.name)) DB readOnly { implicit session => val res = dao.select(q) res should have size(1) res(0) should be (Tbl3(Option(1), Option(2), Option(4), Option(Tbl1(Option(2), None, Option("鈴木一郎"), None, None, None)), Option(Tbl1(Option(4), None, Option("北島三郎"), None, None, None)))) } } } describe("paging 指定の場合") { it("指定ページから指定件数取得") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.columns(df => Seq(df.id)) q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(2) DB readOnly { implicit session => val res = dao.select(q) res should have size(2) res(0) should be (Tbl1(Option(3), None, None, None, None, None)) res(1) should be (Tbl1(Option(4), None, None, None, None, None)) } } } describe("WITH を指定") { it("子一覧も取得される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl2Dao val q = new Tbl2Query q.withTbl1 {q => q.columns(df => Seq(df.id)) q.orderBy(_.id.desc) } q.columns(df => Seq(df.id)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(2) res(0) should be (Tbl2(Option(1), None)) res(1) should be (Tbl2(Option(2), None)) res(0).tbl1List should be (ListBuffer(Tbl1(Option(2), None, None, None, Option(1), None), Tbl1(Option(1), None, None, None, Option(1), None))) res(1).tbl1List should be (ListBuffer(Tbl1(Option(3), None, None, None, Option(2), None))) } } } describe("LIKE で部分一致") { it("指定した文字が含まれるものが取得される") { execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(15, '山田太郎', '2013-03-11 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(16, '鈴木一郎', null)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(17, '坂上二郎', '2013-03-13 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(18, '太地喜和子', CURRENT_TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(19, '菅原文太', CURRENT_TIMESTAMP)") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.name LIKE "太") q.columns(df => Seq(df.number)) q.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(3) res(0) should be (Tbl1(Option(1), Option(15), Option("山田太郎"), None, None, None)) res(1) should be (Tbl1(Option(4), Option(18), Option("太地喜和子"), None, None, None)) res(2) should be (Tbl1(Option(5), Option(19), Option("菅原文太"), None, None, None)) } } } describe("LIKE で前方一致") { it("指定した文字から始まるものが取得される") { execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(15, '山田太郎', '2013-03-11 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(16, '鈴木一郎', null)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(17, '坂上二郎', '2013-03-13 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(18, '太地喜和子', CURRENT_TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(19, '菅原文太', CURRENT_TIMESTAMP)") import com.v_standard.scalikejdbc.orm.query.% val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.name LIKE("太", %)) q.columns(df => Seq(df.number)) q.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(1) res(0) should be (Tbl1(Option(4), Option(18), Option("太地喜和子"), None, None, None)) } } } describe("LIKE で後方一致") { it("指定した文字で終わるものが取得される") { execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(15, '山田太郎', '2013-03-11 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(16, '鈴木一郎', null)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(17, '坂上二郎', '2013-03-13 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(18, '太地喜和子', CURRENT_TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(19, '菅原文太', CURRENT_TIMESTAMP)") import com.v_standard.scalikejdbc.orm.query.% val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.name LIKE(%, "太")) q.columns(df => Seq(df.number)) q.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(1) res(0) should be (Tbl1(Option(5), Option(19), Option("菅原文太"), None, None, None)) } } } describe("エイリアス指定") { it("例外") { val dao = new Tbl1Dao val q = new Tbl1Query q.innerJoinTbl2("a2") DB readOnly { implicit session => evaluating { dao.select(q) } should produce[IllegalStateException] } } } } describe("selectEntity") { describe("存在する PK 指定の場合") { it("指定した値を取得") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.id === 1) q.columns(df => Seq(df.number, df.name)) DB readOnly { implicit session => val res = dao.selectEntity(q) res.get should be (Tbl1(Option(1), Option(15), Option("山田太郎"), None, None, None)) } } } describe("存在しない PK 指定の場合") { it("None を返す") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.id === 10) q.columns(df => Seq(df.number, df.name)) DB readOnly { implicit session => val res = dao.selectEntity(q) res should be (None) } } } describe("WITH を指定") { it("子一覧も取得される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl2Dao val q = new Tbl2Query q.withTbl1 {q => q.columns(df => Seq(df.id)) q.orderBy(_.id.desc) } q.columns(df => Seq(df.id)) q.where(_.id === 2) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.selectEntity(q) res.get should be (Tbl2(Option(2), None)) res.get.tbl1List should be (ListBuffer(Tbl1(Option(3), None, None, None, Option(2), None))) } } } describe("DISTINCT を指定") { it("重複行は削除される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '山田太郎', '2013-03-11 12:15:12', 1)") val dao = new Tbl1Dao val q = new Tbl1Query q.distinct = true q.innerJoinTbl2() q.columns(df => Seq(df.name)) q.tbl2.columns(df => Seq(df.name)) q.where(_.tbl2Id === 1) q.orderBy(_.name.asc) DB readOnly { implicit session => val res = dao.select(q) res should have size(2) res(0) should be (Tbl1(None, None, Option("山田太郎"), None, None, Option(Tbl2(None, Option("あいうえお"))))) res(1) should be (Tbl1(None, None, Option("鈴木一郎"), None, None, Option(Tbl2(None, Option("あいうえお"))))) } } } describe("エイリアスを指定") { it("例外") { val dao = new Tbl1Dao val q = new Tbl1Query(None, "a1") q.where(_.id === 10) q.columns(df => Seq(df.number, df.name)) DB readOnly { implicit session => evaluating { dao.selectEntity(q) } should produce[IllegalStateException] } } } } describe("selectCount") { describe("複数データにマッチするクエリー") { it("件数が取得される") { execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(15, '山田太郎', '2013-03-11 12:15:12')") execute("INSERT INTO tbl1(number, name, ctime) VALUES(16, '鈴木一郎', null)") execute("INSERT INTO tbl1(number, name, ctime) VALUES(17, '坂上二郎', '2013-03-13 12:15:12')") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.number >= 16) q.columns(df => Seq(df.number)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.selectCount(q) res should be (2L) } } } describe("DISTINCT を指定") { it("重複行は削除される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '山田太郎', '2013-03-11 12:15:12', 1)") val dao = new Tbl1Dao val q = new Tbl1Query q.distinct = true q.innerJoinTbl2() q.columns(df => Seq(df.name)) q.tbl2.columns(df => Seq(df.name)) q.where(_.tbl2Id === 1) q.orderBy(_.name.asc) DB readOnly { implicit session => val res = dao.selectCount(q) res should be (2L) } } } } describe("selectPage") { describe("指定された検索結果が 0 件の場合") { describe("1 ページ目指定") { it("0 件のリストを返す") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.number > 50) q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(1) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(0) res.allCount should be (0) res.pageNo should be (1) res.nextPage should be (None) } } } describe("エイリアス指定") { it("例外") { val dao = new Tbl1Dao val q = new Tbl1Query(None, "a1") q.where(_.number > 50) q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(1) DB readOnly { implicit session => evaluating { dao.selectPage(q) } should produce[IllegalStateException] } } } describe("2 ページ目以降指定") { describe("データありの場合") { it("最終ページのリストを返す") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(4) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(1) res.allCount should be (5) res.pageNo should be (3) res.nextPage should be (None) } } } describe("データなしの場合") { it("0 件のリストを返す") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.number > 50) q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(4) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(0) res.allCount should be (0) res.pageNo should be (1) res.nextPage should be (None) } } } } } describe("指定された検索結果がページサイズと等しい場合") { describe("1 ページ目指定") { it("次ページ情報も取得される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(1) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(2) res.allCount should be (5) res.pageNo should be (1) res.nextPage should be (Option(2)) } } } describe("2 ページ目以降指定") { it("次ページ情報も取得される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(2) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(2) res.allCount should be (5) res.pageNo should be (2) res.nextPage should be (Option(3)) } } } } describe("指定された検索結果が 0 とページサイズの間の場合") { describe("1 ページ目指定") { it("次ページ情報は無し") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.where(_.number === 17) q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(1) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(1) res.allCount should be (1) res.pageNo should be (1) res.nextPage should be (None) } } } describe("2 ページ目以降指定") { it("次ページ情報は無し") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(19, '天草四郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl1Dao val q = new Tbl1Query q.orderBy(_.id.asc) q.pageSize = Option(2) q.pageNo = Option(3) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(1) res.allCount should be (5) res.pageNo should be (3) res.nextPage should be (None) } } } } describe("WITH を指定") { it("子一覧も取得される") { execute("CREATE TABLE tbl2(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, name VARCHAR(32))") execute("CREATE TABLE tbl1(id INT generated always as identity (start with 1, increment by 1) PRIMARY KEY, number INT, name VARCHAR(32), ctime TIMESTAMP, tbl2_id INTEGER, CONSTRAINT fk_tbl1_tbl2_id FOREIGN KEY (tbl2_id) REFERENCES tbl2(id))") execute("INSERT INTO tbl2(name) VALUES('あいうえお')") execute("INSERT INTO tbl2(name) VALUES('かきくけこ')") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(15, '山田太郎', '2013-03-11 12:15:12', 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(16, '鈴木一郎', null, 1)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(17, '坂上二郎', '2013-03-13 12:15:12', 2)") execute("INSERT INTO tbl1(number, name, ctime, tbl2_id) VALUES(18, '北島三郎', CURRENT_TIMESTAMP, null)") val dao = new Tbl2Dao val q = new Tbl2Query q.withTbl1 {q => q.columns(df => Seq(df.id)) q.orderBy(_.id.desc) } q.columns(df => Seq(df.id)) q.pageSize = Option(1) q.pageNo = Option(2) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.selectPage(q) res should have size(1) res(0) should be (Tbl2(Option(2), None)) res(0).tbl1List should be (ListBuffer(Tbl1(Option(3), None, None, None, Option(2), None))) } } } } describe("selectSql") { describe("エイリアス未指定") { it("自動生成されたエイリアスの SQL とパラメータを返す") { val dao = new Tbl1Dao val q = new Tbl1Query q.leftJoinTbl2() q.where(_.name === "山田太郎") q.columns(df => Seq(df.number)) q.tbl2.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.selectSql(q) res._1 should be ("SELECT t1.id AS t1_id, t1.number AS t1_number, t2.id AS t2_id, t2.name AS t2_name FROM tbl1 t1 LEFT JOIN tbl2 t2 ON t1.tbl2_id = t2.id WHERE t1.name = ? ORDER BY t1.id") res._2 should be (Seq("山田太郎")) } } } describe("エイリアス指定") { it("指定されたエイリアスの SQL とパラメータを返す") { val dao = new Tbl1Dao val q = new Tbl1Query(None, "a1") q.leftJoinTbl2("a2") q.where(_.name === "山田太郎") q.columns(df => Seq(df.number)) q.tbl2.columns(df => Seq(df.name)) q.orderBy(_.id.asc) DB readOnly { implicit session => val res = dao.selectSql(q) res._1 should be ("SELECT a1.id AS a1_id, a1.number AS a1_number, a2.id AS a2_id, a2.name AS a2_name FROM tbl1 a1 LEFT JOIN tbl2 a2 ON a1.tbl2_id = a2.id WHERE a1.name = ? ORDER BY a1.id") res._2 should be (Seq("山田太郎")) } } } } }
VanishStandard/scalikejdbc-orm
src/test/scala/com/v_standard/scalikejdbc/orm/dao/ReadDaoSpec.scala
Scala
bsd-3-clause
49,395
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import org.apache.spark.sql.catalyst.analysis.UnresolvedException import org.apache.spark.sql.catalyst.errors.TreeNodeException import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.types.{BinaryType, BooleanType, NativeType} object InterpretedPredicate { def apply(expression: Expression, inputSchema: Seq[Attribute]): (Row => Boolean) = apply(BindReferences.bindReference(expression, inputSchema)) def apply(expression: Expression): (Row => Boolean) = { (r: Row) => expression.eval(r).asInstanceOf[Boolean] } } trait Predicate extends Expression { self: Product => def dataType = BooleanType type EvaluatedType = Any } trait PredicateHelper { protected def splitConjunctivePredicates(condition: Expression): Seq[Expression] = { condition match { case And(cond1, cond2) => splitConjunctivePredicates(cond1) ++ splitConjunctivePredicates(cond2) case other => other :: Nil } } protected def splitDisjunctivePredicates(condition: Expression): Seq[Expression] = { condition match { case Or(cond1, cond2) => splitDisjunctivePredicates(cond1) ++ splitDisjunctivePredicates(cond2) case other => other :: Nil } } /** * Returns true if `expr` can be evaluated using only the output of `plan`. This method * can be used to determine when is is acceptable to move expression evaluation within a query * plan. * * For example consider a join between two relations R(a, b) and S(c, d). * * `canEvaluate(EqualTo(a,b), R)` returns `true` where as `canEvaluate(EqualTo(a,c), R)` returns * `false`. */ protected def canEvaluate(expr: Expression, plan: LogicalPlan): Boolean = expr.references.subsetOf(plan.outputSet) } abstract class BinaryPredicate extends BinaryExpression with Predicate { self: Product => def nullable = left.nullable || right.nullable } case class Not(child: Expression) extends UnaryExpression with Predicate { override def foldable = child.foldable def nullable = child.nullable override def toString = s"NOT $child" override def eval(input: Row): Any = { child.eval(input) match { case null => null case b: Boolean => !b } } } /** * Evaluates to `true` if `list` contains `value`. */ case class In(value: Expression, list: Seq[Expression]) extends Predicate { def children = value +: list def nullable = true // TODO: Figure out correct nullability semantics of IN. override def toString = s"$value IN ${list.mkString("(", ",", ")")}" override def eval(input: Row): Any = { val evaluatedValue = value.eval(input) list.exists(e => e.eval(input) == evaluatedValue) } } /** * Optimized version of In clause, when all filter values of In clause are * static. */ case class InSet(value: Expression, hset: Set[Any]) extends Predicate { def children = value :: Nil def nullable = true // TODO: Figure out correct nullability semantics of IN. override def toString = s"$value INSET ${hset.mkString("(", ",", ")")}" override def eval(input: Row): Any = { hset.contains(value.eval(input)) } } case class And(left: Expression, right: Expression) extends BinaryPredicate { def symbol = "&&" override def eval(input: Row): Any = { val l = left.eval(input) if (l == false) { false } else { val r = right.eval(input) if (r == false) { false } else { if (l != null && r != null) { true } else { null } } } } } case class Or(left: Expression, right: Expression) extends BinaryPredicate { def symbol = "||" override def eval(input: Row): Any = { val l = left.eval(input) if (l == true) { true } else { val r = right.eval(input) if (r == true) { true } else { if (l != null && r != null) { false } else { null } } } } } abstract class BinaryComparison extends BinaryPredicate { self: Product => } case class EqualTo(left: Expression, right: Expression) extends BinaryComparison { def symbol = "=" override def eval(input: Row): Any = { val l = left.eval(input) if (l == null) { null } else { val r = right.eval(input) if (r == null) null else if (left.dataType != BinaryType) l == r else BinaryType.ordering.compare( l.asInstanceOf[Array[Byte]], r.asInstanceOf[Array[Byte]]) == 0 } } } case class EqualNullSafe(left: Expression, right: Expression) extends BinaryComparison { def symbol = "<=>" override def nullable = false override def eval(input: Row): Any = { val l = left.eval(input) val r = right.eval(input) if (l == null && r == null) { true } else if (l == null || r == null) { false } else { l == r } } } case class LessThan(left: Expression, right: Expression) extends BinaryComparison { def symbol = "<" lazy val ordering = { if (left.dataType != right.dataType) { throw new TreeNodeException(this, s"Types do not match ${left.dataType} != ${right.dataType}") } left.dataType match { case i: NativeType => i.ordering.asInstanceOf[Ordering[Any]] case other => sys.error(s"Type $other does not support ordered operations") } } override def eval(input: Row): Any = { val evalE1 = left.eval(input) if(evalE1 == null) { null } else { val evalE2 = right.eval(input) if (evalE2 == null) { null } else { ordering.lt(evalE1, evalE2) } } } } case class LessThanOrEqual(left: Expression, right: Expression) extends BinaryComparison { def symbol = "<=" lazy val ordering = { if (left.dataType != right.dataType) { throw new TreeNodeException(this, s"Types do not match ${left.dataType} != ${right.dataType}") } left.dataType match { case i: NativeType => i.ordering.asInstanceOf[Ordering[Any]] case other => sys.error(s"Type $other does not support ordered operations") } } override def eval(input: Row): Any = { val evalE1 = left.eval(input) if(evalE1 == null) { null } else { val evalE2 = right.eval(input) if (evalE2 == null) { null } else { ordering.lteq(evalE1, evalE2) } } } } case class GreaterThan(left: Expression, right: Expression) extends BinaryComparison { def symbol = ">" lazy val ordering = { if (left.dataType != right.dataType) { throw new TreeNodeException(this, s"Types do not match ${left.dataType} != ${right.dataType}") } left.dataType match { case i: NativeType => i.ordering.asInstanceOf[Ordering[Any]] case other => sys.error(s"Type $other does not support ordered operations") } } override def eval(input: Row): Any = { val evalE1 = left.eval(input) if(evalE1 == null) { null } else { val evalE2 = right.eval(input) if (evalE2 == null) { null } else { ordering.gt(evalE1, evalE2) } } } } case class GreaterThanOrEqual(left: Expression, right: Expression) extends BinaryComparison { def symbol = ">=" lazy val ordering = { if (left.dataType != right.dataType) { throw new TreeNodeException(this, s"Types do not match ${left.dataType} != ${right.dataType}") } left.dataType match { case i: NativeType => i.ordering.asInstanceOf[Ordering[Any]] case other => sys.error(s"Type $other does not support ordered operations") } } override def eval(input: Row): Any = { val evalE1 = left.eval(input) if(evalE1 == null) { null } else { val evalE2 = right.eval(input) if (evalE2 == null) { null } else { ordering.gteq(evalE1, evalE2) } } } } case class If(predicate: Expression, trueValue: Expression, falseValue: Expression) extends Expression { def children = predicate :: trueValue :: falseValue :: Nil override def nullable = trueValue.nullable || falseValue.nullable override lazy val resolved = childrenResolved && trueValue.dataType == falseValue.dataType def dataType = { if (!resolved) { throw new UnresolvedException( this, s"Can not resolve due to differing types ${trueValue.dataType}, ${falseValue.dataType}") } trueValue.dataType } type EvaluatedType = Any override def eval(input: Row): Any = { if (true == predicate.eval(input)) { trueValue.eval(input) } else { falseValue.eval(input) } } override def toString = s"if ($predicate) $trueValue else $falseValue" } // scalastyle:off /** * Case statements of the form "CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END". * Refer to this link for the corresponding semantics: * https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF#LanguageManualUDF-ConditionalFunctions * * The other form of case statements "CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END" gets * translated to this form at parsing time. Namely, such a statement gets translated to * "CASE WHEN a=b THEN c [WHEN a=d THEN e]* [ELSE f] END". * * Note that `branches` are considered in consecutive pairs (cond, val), and the optional last * element is the value for the default catch-all case (if provided). Hence, `branches` consists of * at least two elements, and can have an odd or even length. */ // scalastyle:on case class CaseWhen(branches: Seq[Expression]) extends Expression { type EvaluatedType = Any def children = branches def dataType = { if (!resolved) { throw new UnresolvedException(this, "cannot resolve due to differing types in some branches") } branches(1).dataType } @transient private[this] lazy val branchesArr = branches.toArray @transient private[this] lazy val predicates = branches.sliding(2, 2).collect { case Seq(cond, _) => cond }.toSeq @transient private[this] lazy val values = branches.sliding(2, 2).collect { case Seq(_, value) => value }.toSeq @transient private[this] lazy val elseValue = if (branches.length % 2 == 0) None else Option(branches.last) override def nullable = { // If no value is nullable and no elseValue is provided, the whole statement defaults to null. values.exists(_.nullable) || (elseValue.map(_.nullable).getOrElse(true)) } override lazy val resolved = { if (!childrenResolved) { false } else { val allCondBooleans = predicates.forall(_.dataType == BooleanType) // both then and else val should be considered. val dataTypesEqual = (values ++ elseValue).map(_.dataType).distinct.size <= 1 allCondBooleans && dataTypesEqual } } /** Written in imperative fashion for performance considerations. */ override def eval(input: Row): Any = { val len = branchesArr.length var i = 0 // If all branches fail and an elseVal is not provided, the whole statement // defaults to null, according to Hive's semantics. var res: Any = null while (i < len - 1) { if (branchesArr(i).eval(input) == true) { res = branchesArr(i + 1).eval(input) return res } i += 2 } if (i == len - 1) { res = branchesArr(i).eval(input) } res } override def toString = { "CASE" + branches.sliding(2, 2).map { case Seq(cond, value) => s" WHEN $cond THEN $value" case Seq(elseValue) => s" ELSE $elseValue" }.mkString } }
hengyicai/OnlineAggregationUCAS
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
Scala
apache-2.0
12,402
/** * Copyright 2015 Thomson Reuters * * Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ package cmwell.formats import cmwell.domain.{AggregationResponse, _} import cmwell.common.file.MimeTypeIdentifier.isTextual import com.typesafe.scalalogging.LazyLogging import org.joda.time.DateTime /** * Created by gilad on 12/4/14. */ trait Formatter extends LazyLogging { def format: FormatType def mimetype: String = format.mimetype def render(formattable: Formattable): String //sharing what I can as high in the inheritance hierarchy that is possible //TODO: why `type` isn't a part of `system`? protected def system[K, V](infoton: Infoton, mkKey: String => K, mkVal: String => V, mkDateVal: DateTime => V, mkLongVal: Long => V, includeParent: Boolean = true): Seq[(K, V)] = { val indexTime = infoton.indexTime.map(it => Seq(mkKey("indexTime") -> mkLongVal(it))).getOrElse(Nil) val protocol = Seq(mkKey("protocol") -> mkVal(infoton.protocol.getOrElse(cmwell.common.Settings.defaultProtocol))) val seq = Seq( // mkKey("type") -> mkVal(infoton.kind), //TODO: add type to system instead of outer level mkKey("uuid") -> mkVal(infoton.uuid), mkKey("lastModified") -> mkDateVal(infoton.lastModified), mkKey("path") -> mkVal(infoton.path), mkKey("dataCenter") -> mkVal(infoton.dc) ) ++ indexTime ++ protocol ++ Seq(mkKey("parent") -> mkVal(infoton.parent)) if (includeParent) seq else seq.init } protected def thinResult[K, V](result: SearchThinResult, mkKey: String => K, mkVal: String => V, mkLongVal: Long => V, mkFloatVal: Float => V): Seq[(K, V)] = { Seq( mkKey("path") -> mkVal(result.path), mkKey("uuid") -> mkVal(result.uuid), mkKey("lastModified") -> mkVal(result.lastModified), mkKey("indexTime") -> mkLongVal(result.indexTime) ) ++ result.score.map { score => mkKey("score") -> mkFloatVal(score) } } protected def fileContent[K, V](fileContent: FileContent, mkKey: String => K, mkVal: String => V, mkLongVal: Long => V, mkByteArrayVal: Array[Byte] => V): Seq[(K, V)] = fileContent match { case c @ FileContent(data, mime, dl, _) => { val xs = Seq(mkKey("mimeType") -> mkVal(mime), mkKey("length") -> mkLongVal(data.fold(dl)(_.length))) data match { case Some(data) if isTextual(mime) => xs :+ (mkKey("data") -> mkVal(c.asString)) case Some(data) => xs :+ (mkKey("base64-data") -> mkByteArrayVal(data)) case None => xs } } } protected def pagination[K, V](pagination: PaginationInfo, mkKey: String => K, mkVal: String => V, mkRefVal: String => V): Seq[(K, V)] = Seq( Some("type" -> mkVal("PaginationInfo")), Some("first" -> mkRefVal(pagination.first)), pagination.previous.map(x => "previous" -> mkRefVal(x)), Some("self" -> mkRefVal(pagination.self)), pagination.next.map(x => "next" -> mkRefVal(x)), Some("last" -> mkRefVal(pagination.last)) ).collect { case Some((k, v)) => mkKey(k) -> v } protected def simpleResponse[K, V](simpleResponse: SimpleResponse, mkKey: String => K, mkVal: String => V, mkBoolVal: Boolean => V): Seq[(K, V)] = Seq( Some("type" -> mkVal("SimpleResponse")), Some("success" -> mkBoolVal(simpleResponse.success)), simpleResponse.msg.map(msg => "message" -> mkVal(msg)) ).collect { case Some((k, v)) => mkKey(k) -> v } // //attempting to reuse code at a higher level of abstraction (currently, achieved only for flat types) // protected def infoton[K,V](i: Infoton, // mkKey: String => K, // mkVal: String => V, // mkLongVal: Long => V, // mkIntVal: Int => V, // mkRefVal: String => V, // compose: Seq[(K,V)] => V): Seq[(K,V)] = { // val empty = Seq.empty[(K,V)] // (mkKey("type") -> mkVal(i.kind)) +: (i match { // case CompoundInfoton(_, _, _, _, offset, length, total) => Seq( // mkKey("offset") -> mkLongVal(offset), // mkKey("length") -> mkLongVal(length), // mkKey("total") -> mkLongVal(total) // ) // case ObjectInfoton(_, _, _) => empty // case FileInfoton(_, _, _, _) => empty // case LinkInfoton(_, _, _, linkTo, linkType) => Seq( // mkKey("linkTo") -> mkRefVal(linkTo), // mkKey("linkType") -> mkIntVal(linkType) // ) // case d: DeletedInfoton => empty // }) // } def cleanDuplicatesPreserveOrder[T](values: Seq[T]): Seq[T] = ((Seq.empty[T] -> Set.empty[T]) /: values) { case (tup @ (seq, set), v) if set(v) => tup case ((seq, set), v) => (seq :+ v) -> set }._1 } abstract class SimpleFormater extends Formatter { protected def thinResultToString(r: SearchThinResult): String protected def infotonToString(i: Infoton): String override def render(formattable: Formattable): String = formattable match { case i: Infoton => infotonToString(i) case str: SearchThinResult => thinResultToString(str) case bag: BagOfInfotons => bag.infotons.map(infotonToString).mkString("", "\\n", "\\n") case ihv: InfotonHistoryVersions => ihv.versions.map(infotonToString).mkString("", "\\n", "\\n") case rp: RetrievablePaths => rp.infotons.map(infotonToString).mkString("", "\\n", "\\n") case sr: SearchResults => sr.infotons.map(infotonToString).mkString("", "\\n", "\\n") case sr: SearchResponse => sr.results.infotons.map(infotonToString).mkString("", "\\n", "\\n") case ir: IterationResults => ir.infotons.map(_.map(infotonToString).mkString("", "\\n", "\\n")).getOrElse("") case _ => throw new NotImplementedError( s"PathFormatter.render implementation not found for ${formattable.getClass.toString}" ) } } object TsvFormatter extends SimpleFormater { override def format: FormatType = TsvType override protected def thinResultToString(r: SearchThinResult): String = s"${r.path}\\t${r.lastModified}\\t${r.uuid}\\t${r.indexTime}" + r.score.fold("")(score => s"\\t$score") override protected def infotonToString(i: Infoton): String = s"${i.path}\\t${cmwell.util.string.dateStringify(i.lastModified)}\\t${i.uuid}\\t${i.indexTime.fold("")(_.toString)}${i.fields .flatMap(_.get("$score").flatMap(_.headOption.map("\\t" + _.value.toString))) .getOrElse("")}" } object PathFormatter extends SimpleFormater { override def format: FormatType = TextType override protected def thinResultToString(r: SearchThinResult): String = r.path override protected def infotonToString(i: Infoton): String = i.path } trait TreeLikeFormatter extends Formatter { type Inner val fieldNameModifier: String => String def makeFromTuples(tuples: Seq[(String, Inner)]): Inner def makeFromValues(values: Seq[Inner]): Inner def single[T](value: T): Inner def singleFieldValue(fv: FieldValue): Inner = fv match { case FString(value: String, _, _) => single(value) case FInt(value: Int, _) => single(value) case FLong(value: Long, _) => single(value) case FBigInt(value: java.math.BigInteger, _) => single(value) case FFloat(value: Float, _) => single(value) case FDouble(value: Double, _) => single(value) case FBigDecimal(value: java.math.BigDecimal, _) => single(value) case FBoolean(value: Boolean, _) => single(value) case FDate(value: String, _) => single(value) case FReference(value: String, _) => single({ if (value.startsWith("cmwell://")) value.drop("cmwell:/".length) else value }) case FExternal(value: String, dataTypeURI: String, _) => single(value) case FExtra(v, _) => single(v) case FNull(_) => ??? //this is just a marker for IMP, should not index it anywhere... } def empty: Inner protected def mkString(value: Inner): String override def render(formattable: Formattable): String = mkString(formattable2Inner(formattable)) def formattable2Inner(formattable: Formattable): Inner = formattable match { case r: SearchThinResult => makeFromTuples(thinResult(r, identity, single, single, single)) case i: Infoton => infoton(i) case bag: BagOfInfotons => bagOfInfotons(bag) case ihv: InfotonHistoryVersions => infotonHistoryVersions(ihv) case rp: RetrievablePaths => retrievablePaths(rp) case pi: PaginationInfo => pagination(pi) case sr: SearchResults => searchResults(sr) case sr: SearchResponse => searchResponse(sr) case ir: IterationResults => iterationResults(ir) case sr: SimpleResponse => simpleResponse(sr) case ar: AggregationsResponse => aggregationsResponse(ar) case tar: TermsAggregationResponse => termsAggregationsResponse(tar) case sar: StatsAggregationResponse => statsAggregationsResponse(sar) case har: HistogramAggregationResponse => histogramAggregationResponse(har) case star: SignificantTermsAggregationResponse => significantTermsAggregationResponse(star) case car: CardinalityAggregationResponse => cardinalityAggregationResponse(car) case bar: BucketsAggregationResponse => bucketsAggregationResponse(bar) case ar: AggregationResponse => aggregationResponse(ar) case taf: TermAggregationFilter => termAggregationFilter(taf) case saf: StatsAggregationFilter => statsAggregationFilter(saf) case haf: HistogramAggregationFilter => histogramAggregationFilter(haf) case staf: SignificantTermsAggregationFilter => significantTermsAggregationFilter(staf) case caf: CardinalityAggregationFilter => cardinalityAggregationFilter(caf) case baf: BucketAggregationFilter => bucketAggregationFilter(baf) case af: AggregationFilter => aggregationFilter(af) case sb: SignificantTermsBucket => significantTermsBucket(sb) case b: Bucket => bucket(b) case _ => throw new NotImplementedError(s"formattable2Inner implementation not found for ${formattable.getClass.toString}") } def system(i: Infoton): Inner = makeFromTuples(system(i, identity, single, single, single)) private val memoizedBreakOutIn = scala.collection.breakOut[Set[FieldValue], Inner, Seq[Inner]] private val memoizedBreakOutOut = scala.collection.breakOut[Map[String, Set[FieldValue]], (String, Inner), Seq[(String, Inner)]] def fields(i: Infoton): Inner = i.fields match { case None => empty case Some(xs) => makeFromTuples(xs.collect { case (fieldName, set) if fieldName.head != '$' => fieldNameModifier(fieldName) -> makeFromValues(set.toSeq.map(singleFieldValue)) }(memoizedBreakOutOut)) } def extra(i: Infoton): Seq[(String, Inner)] = i.fields match { case Some(xs) if xs.exists(_._1.head == '$') => Seq("extra" -> makeFromTuples(xs.collect { case (k, vs) if k.head == '$' => { fieldNameModifier(k.tail) -> makeFromValues(vs.collect { case FExtra(v, _) => single(v) }(memoizedBreakOutIn)) } }(memoizedBreakOutOut))) case _ => Nil } def fileContent(c: FileContent): Inner = makeFromTuples(super.fileContent(c, identity, single, single, single)) def infotons(is: Seq[Infoton], withDeleted: Boolean = false) = makeFromValues(is.map(infoton)) def infoton(i: Infoton): Inner = { val iSystem: Inner = system(i) val iFields: Seq[(String, Inner)] = i.fields .flatMap { case m if m.forall(_._1.head == '$') => None case m => Some(m) } .fold(Seq.empty[(String, Inner)])(_ => Seq("fields" -> fields(i))) val iExtra: Seq[(String, Inner)] = extra(i) (i: @unchecked) match { case CompoundInfoton(_, _, _, _, _, children, offset, length, total, _, _) => makeFromTuples( Seq( "type" -> single(i.kind), "system" -> iSystem, "children" -> infotons(children), "offset" -> single(offset), "length" -> single(length), "total" -> single(total) ) ++ iExtra ++ iFields ) case ObjectInfoton(_, _, _, _, _, _, _) => makeFromTuples( Seq( "type" -> single(i.kind), "system" -> iSystem ) ++ iExtra ++ iFields ) case FileInfoton(_, _, _, _, _, Some(content), _, _) => makeFromTuples( Seq( "type" -> single(i.kind), "system" -> iSystem, "content" -> fileContent(content) ) ++ iExtra ++ iFields ) case FileInfoton(_, _, _, _, _, None, _, _) => makeFromTuples( Seq( "type" -> single(i.kind), "system" -> iSystem ) ++ iExtra ++ iFields ) case LinkInfoton(_, _, _, _, _, linkTo, linkType, _, _) => makeFromTuples( Seq( "type" -> single(i.kind), "system" -> iSystem, "linkTo" -> single(linkTo), "linkType" -> single(linkType) ) ++ iExtra ++ iFields ) case d: DeletedInfoton => makeFromTuples( Seq( "type" -> single(i.kind), "system" -> iSystem ) ) } } def bagOfInfotons(bag: BagOfInfotons, withDeleted: Boolean = false) = makeFromTuples( Seq( "type" -> single("BagOfInfotons"), "infotons" -> infotons(bag.infotons, withDeleted) ) ) def infotonHistoryVersions(ihv: InfotonHistoryVersions, withDeleted: Boolean = true) = makeFromTuples( Seq( "type" -> single("InfotonHistoryVersions"), "versions" -> infotons(ihv.versions, withDeleted) ) ) def retrievablePaths(rp: RetrievablePaths, withDeleted: Boolean = false) = makeFromTuples( Seq( "type" -> single("RetrievablePaths"), "infotons" -> infotons(rp.infotons, withDeleted), "irretrievablePaths" -> makeFromValues(rp.irretrievablePaths.map(single)) ) ) def pagination(pagination: PaginationInfo) = makeFromTuples(super.pagination(pagination, identity, single, single)) def searchResults(searchResults: SearchResults, withDeleted: Boolean = false) = { val searchQueryStr = if (searchResults.debugInfo.isDefined) Seq(Some("searchQueryStr" -> single(searchResults.debugInfo.get))) else Nil makeFromTuples( (Seq( Some("type" -> single("SearchResults")), searchResults.fromDate.map(d => "fromDate" -> single(d)), searchResults.toDate.map(d => "toDate" -> single(d)), Some("total" -> single(searchResults.total)), Some("offset" -> single(searchResults.offset)), Some("length" -> single(searchResults.length)), Some("infotons" -> infotons(searchResults.infotons, withDeleted)) ) ++ searchQueryStr).collect { case Some(t) => t } ) } def searchResponse(SearchResponse: SearchResponse, withDeleted: Boolean = false) = makeFromTuples( Seq( "type" -> single("SearchResponse"), "pagination" -> pagination(SearchResponse.pagination), "results" -> searchResults(SearchResponse.results) ) ) def iterationResults(iterationResults: IterationResults, withDeleted: Boolean = false) = { val s = Seq( "type" -> single("IterationResults"), "iteratorId" -> single(iterationResults.iteratorId), "totalHits" -> single(iterationResults.totalHits), "infotons" -> infotons(iterationResults.infotons.getOrElse(Seq.empty[Infoton]), withDeleted) ) makeFromTuples(iterationResults.debugInfo.fold(s)(d => s :+ ("searchQueryStr", single(d)))) } def simpleResponse(sr: SimpleResponse) = makeFromTuples(super.simpleResponse(sr, identity, single, single)) def aggregationsResponse(ar: AggregationsResponse) = makeFromTuples( Seq("AggregationResponse" -> makeFromValues(ar.responses.map(formattable2Inner))) ++ ar.debugInfo.map("searchQueryStr" -> single(_)) ) def termsAggregationsResponse(tar: TermsAggregationResponse) = makeFromTuples( Seq( "name" -> single(tar.name), "type" -> single(tar.`type`), "filter" -> formattable2Inner(tar.filter), "buckets" -> makeFromValues(tar.buckets.map(formattable2Inner)) ) ) def statsAggregationsResponse(sar: StatsAggregationResponse) = makeFromTuples( Seq( "name" -> single(sar.name), "type" -> single(sar.`type`), "filter" -> formattable2Inner(sar.filter), "count" -> single(sar.count), "min" -> single(sar.min), "max" -> single(sar.max), "avg" -> single(sar.avg), "sum" -> single(sar.sum) ) ) def histogramAggregationResponse(har: HistogramAggregationResponse) = makeFromTuples( Seq( "name" -> single(har.name), "type" -> single(har.`type`), "filter" -> formattable2Inner(har.filter), "buckets" -> makeFromValues(har.buckets.map(formattable2Inner)) ) ) def significantTermsAggregationResponse(star: SignificantTermsAggregationResponse) = makeFromTuples( Seq( "name" -> single(star.name), "type" -> single(star.`type`), "filter" -> formattable2Inner(star.filter), "objects" -> single(star.docCount), "buckets" -> makeFromValues(star.buckets.map(formattable2Inner)) ) ) def cardinalityAggregationResponse(car: CardinalityAggregationResponse) = makeFromTuples( Seq( "name" -> single(car.name), "type" -> single(car.`type`), "filter" -> formattable2Inner(car.filter), "count" -> single(car.count) ) ) def bucketsAggregationResponse(bar: BucketsAggregationResponse) = makeFromTuples( Seq( "name" -> single(bar.name), "type" -> single(bar.`type`), "filter" -> formattable2Inner(bar.filter), "buckets" -> makeFromValues(bar.buckets.map(formattable2Inner)) ) ) def aggregationResponse(ar: AggregationResponse) = makeFromTuples( Seq( "name" -> single(ar.name), "type" -> single(ar.`type`), "filter" -> formattable2Inner(ar.filter) ) ) def aggregationFilter(af: AggregationFilter) = makeFromTuples( Seq( "name" -> single(af.name), "type" -> single(af.`type`) ) ) def bucketAggregationFilter(baf: BucketAggregationFilter) = makeFromTuples( Seq( "name" -> single(baf.name), "type" -> single(baf.`type`), "subFilters" -> makeFromValues(baf.subFilters.map(formattable2Inner)) ) ) def statsAggregationFilter(saf: StatsAggregationFilter) = makeFromTuples( Seq( "name" -> single(saf.name), "type" -> single(saf.`type`), "field" -> single(saf.field.value) ) ) def termAggregationFilter(taf: TermAggregationFilter) = makeFromTuples( Seq( "name" -> single(taf.name), "type" -> single(taf.`type`), "field" -> single(taf.field.value), "size" -> single(taf.size), "subFilters" -> makeFromValues(taf.subFilters.map(formattable2Inner)) ) ) def histogramAggregationFilter(haf: HistogramAggregationFilter) = makeFromTuples( Seq( "name" -> single(haf.name), "type" -> single(haf.`type`), "field" -> single(haf.field.value), "interval" -> single(haf.interval), "minimum doc count" -> single(haf.minDocCount) ) ++ haf.extMin.map("extMin" -> single(_)) ++ haf.extMax.map("extMax" -> single(_)) ++ Seq("subFilters" -> makeFromValues(haf.subFilters.map(formattable2Inner))) ) def significantTermsAggregationFilter(staf: SignificantTermsAggregationFilter) = makeFromTuples( Seq( "name" -> single(staf.name), "type" -> single(staf.`type`), "field" -> single(staf.field.value), "minimum doc count" -> single(staf.minDocCount), "size" -> single(staf.size) ) ++ staf.backgroundTerm.map("background term" -> single(_)) ++ Seq("subFilters" -> makeFromValues(staf.subFilters.map(formattable2Inner))) ) def cardinalityAggregationFilter(caf: CardinalityAggregationFilter) = makeFromTuples( Seq("name" -> single(caf.name), "type" -> single(caf.`type`), "field" -> single(caf.field.value)) ++ caf.precisionThreshold.map("precision threshold" -> single(_)) ) def bucket(b: Bucket) = makeFromTuples( Seq("key" -> single(b.key.value), "objects" -> single(b.docCount)) ++ b.subAggregations.map("subAggregations" -> formattable2Inner(_)) ) def significantTermsBucket(b: SignificantTermsBucket) = makeFromTuples( Seq("key" -> single(b.key.value), "objects" -> single(b.docCount), "score" -> single(b.score), "bgCount" -> single(b.bgCount)) ++ b.subAggregations.map("subAggregations" -> formattable2Inner(_)) ) }
thomsonreuters/CM-Well
server/cmwell-formats/src/main/scala/cmwell/formats/Formatter.scala
Scala
apache-2.0
22,866
package org.http4s.server.middleware.authentication import java.util.LinkedHashMap import scala.annotation.tailrec private[authentication] object NonceKeeper { sealed abstract class Reply case object StaleReply extends Reply case object OKReply extends Reply case object BadNCReply extends Reply } /** * A thread-safe class used to manage a database of nonces. * * @param staleTimeout Amount of time (in milliseconds) after which a nonce * is considered stale (i.e. not used for authentication * purposes anymore). * @param bits The number of random bits a nonce should consist of. */ private[authentication] class NonceKeeper(staleTimeout: Long, nonceCleanupInterval: Long, bits: Int) { require(bits > 0, "Please supply a positive integer for bits.") private val nonces = new LinkedHashMap[String, Nonce] private var lastCleanup = System.currentTimeMillis() /** * Removes nonces that are older than staleTimeout * Note: this _MUST_ be executed inside a block synchronized on `nonces` */ private def checkStale() = { val d = System.currentTimeMillis() if (d - lastCleanup > nonceCleanupInterval) { lastCleanup = d // Because we are using an LinkedHashMap, the keys will be returned in the order they were // inserted. Therefor, once we reach a non-stale value, the remaining values are also not stale. val it = nonces.values().iterator() @tailrec def dropStale(): Unit = { if (it.hasNext && staleTimeout > d - it.next().created.getTime) { it.remove() dropStale() } } dropStale() } } /** * Get a fresh nonce in form of a {@link String}. * @return A fresh nonce. */ def newNonce(): String = { var n: Nonce = null nonces.synchronized { checkStale() do { n = Nonce.gen(bits) } while (nonces.get(n.data) != null) nonces.put(n.data, n) } n.data } /** * Checks if the nonce {@link data} is known and the {@link nc} value is * correct. If this is so, the nc value associated with the nonce is increased * and the appropriate status is returned. * @param data The nonce. * @param nc The nonce counter. * @return A reply indicating the status of (data, nc). */ def receiveNonce(data: String, nc: Int): NonceKeeper.Reply = nonces.synchronized { checkStale() nonces.get(data) match { case null => NonceKeeper.StaleReply case n: Nonce => { if (nc > n.nc) { n.nc = n.nc + 1 NonceKeeper.OKReply } else NonceKeeper.BadNCReply } } } }
ZizhengTai/http4s
server/src/main/scala/org/http4s/server/middleware/authentication/NonceKeeper.scala
Scala
apache-2.0
2,698
/* * Protein.scala * A protein class, which is an Element[AminoAcidSequence]. * * Created By: Brian Ruttenberg (bruttenberg@cra.com) * Creation Date: Oct 1, 2012 * * Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc. * See http://www.cra.com or email figaro@cra.com for information. * * See http://www.github.com/p2t2/figaro for a copy of the software license. */ package com.cra.figaro.example.dosage import com.cra.figaro.language._ import com.cra.figaro.library.atomic.discrete._ import com.cra.figaro.example.dosage.Conversion._ /** * A protein class, which is an Element[AminoAcidSequence]. * Given an input string, the protein represents a distribution over * the possible AminoAcidSequences that can be generated from the string */ class Protein(name: Name[AminoAcidSequence], arg1: String, collection: ElementCollection) extends Apply1[List[AminoAcidSequence], AminoAcidSequence](name, Protein.genInjectFcn(arg1), Protein.genApplyFcn, collection) object Protein { def genInjectFcn = (s: String) => genFcn(s) def genApplyFcn = (l: List[AminoAcidSequence]) => (AminoAcidSequence("") /: l)(_ + _) /* A '-' means any AA, so this is a uniform distribution over existing AA. A * named AA is represented as a constant */ def genFcn(s: String): Inject[AminoAcidSequence] = { val elems = s.map { c => c match { case '-' => Uniform(aaListAsSeq: _*) case _ => Constant(AminoAcidSequence(c.toString)) } } Inject(elems: _*) } def apply(arg: String)(implicit name: Name[AminoAcidSequence], collection: ElementCollection) = new Protein(name, arg, collection) }
bruttenberg/figaro
FigaroExamples/src/main/scala/com/cra/figaro/example/dosage/Protein.scala
Scala
bsd-3-clause
1,663
package mesosphere.marathon.integration.facades import MesosFacade.{ ITResourcePortValue, ITResourceScalarValue, ITResources } object MesosFormats { import MesosFacade._ import mesosphere.marathon.api.v2.json.Formats.FormatWithDefault import play.api.libs.functional.syntax._ import play.api.libs.json._ implicit lazy val ITResourceScalarValueFormat: Format[ITResourceScalarValue] = Format( Reads.of[Double].map(ITResourceScalarValue(_)), Writes(scalarValue => JsNumber(scalarValue.value)) ) implicit lazy val ITResourcePortValueFormat: Format[ITResourcePortValue] = Format( Reads.of[String].map(ITResourcePortValue(_)), Writes(portValue => JsString(portValue.portString)) ) implicit lazy val ITResourceValueFormat: Format[ITResourceValue] = Format( Reads[ITResourceValue] { case JsNumber(value) => JsSuccess(ITResourceScalarValue(value.toDouble)) case JsString(portsString) => JsSuccess(ITResourcePortValue(portsString)) case _ => JsError("expected string or number") }, Writes[ITResourceValue] { case ITResourceScalarValue(value) => JsNumber(value) case ITResourcePortValue(portsString) => JsString(portsString) } ) implicit lazy val ITResourcesFormat: Format[ITResources] = Format( Reads.of[Map[String, ITResourceValue]].map(ITResources(_)), Writes[ITResources](resources => Json.toJson(resources.resources)) ) implicit lazy val ITAgentFormat: Format[ITAgent] = ( (__ \\ "id").format[String] ~ (__ \\ "resources").formatNullable[ITResources].withDefault(ITResources.empty) ~ (__ \\ "used_resources").formatNullable[ITResources].withDefault(ITResources.empty) ~ (__ \\ "offered_resources").formatNullable[ITResources].withDefault(ITResources.empty) ~ (__ \\ "reserved_resources").formatNullable[Map[String, ITResources]].withDefault(Map.empty) ~ (__ \\ "unreserved_resources").formatNullable[ITResources].withDefault(ITResources.empty) )(ITAgent.apply, unlift(ITAgent.unapply)) implicit lazy val ITStatusFormat: Format[ITMesosState] = ( (__ \\ "version").format[String] ~ (__ \\ "git_tag").formatNullable[String] ~ (__ \\ "slaves").format[Iterable[ITAgent]] )(ITMesosState.apply, unlift(ITMesosState.unapply)) }
ss75710541/marathon
src/test/scala/mesosphere/marathon/integration/facades/MesosFormats.scala
Scala
apache-2.0
2,282
/* * Copyright 2015 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.atlas.core.model import nl.jqno.equalsverifier.EqualsVerifier import nl.jqno.equalsverifier.Warning import org.scalatest.FunSuite class ArrayTimeSeqSuite extends FunSuite { test("equals") { EqualsVerifier .forClass(classOf[ArrayTimeSeq]) .suppress(Warning.NULL_FIELDS) .verify() } }
jasimmk/atlas
atlas-core/src/test/scala/com/netflix/atlas/core/model/ArrayTimeSeqSuite.scala
Scala
apache-2.0
929
package nodes.images import breeze.linalg.DenseVector import pipelines._ import utils.{ImageMetadata, ChannelMajorArrayVectorizedImage, Image} import workflow.Transformer import utils.external.NativeRoutines /** * This node takes an image and performs pooling on regions of the image. * based on Pooler and Symmetric Rectifier. Doubles the number of channels. */ class NativePoolingSymmetricRectifier( stride: Int, poolSize: Int, maxVal: Double = 0.0, alpha: Double = 0.0, numChannels: Int = 3, xDim: Int, yDim: Int ) extends Transformer[Array[Double], Array[Double]] { @transient lazy val extLib = new NativeRoutines() def apply(image: Array[Double]) = { val out:Array[Double] = extLib.poolAndRectify(stride, poolSize, numChannels, xDim, yDim, maxVal, alpha, image) out } }
Vaishaal/ckm
keystone_pipeline/src/main/scala/nodes/NativePoolingSymmetricRectifier.scala
Scala
apache-2.0
812
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.accounts.frs10x.boxes import uk.gov.hmrc.ct.accounts.frs10x.boxes.AC15 import uk.gov.hmrc.ct.accounts.frs10x.retriever.Frs10xAccountsBoxRetriever import uk.gov.hmrc.ct.accounts.{AccountsMoneyValidationFixture, AccountsPreviousPeriodValidationFixture, MockFullAccountsRetriever} class AC15Spec extends AccountsMoneyValidationFixture[Frs10xAccountsBoxRetriever] with AccountsPreviousPeriodValidationFixture[Frs10xAccountsBoxRetriever] with MockFullAccountsRetriever { testAccountsMoneyValidationWithMin("AC15", 0, AC15.apply) testAccountsPreviousPoAValidation("AC15", AC15.apply) }
hmrc/ct-calculations
src/test/scala/uk/gov/hmrc/ct/accounts/frs10x/boxes/AC15Spec.scala
Scala
apache-2.0
1,216
/* NSC -- new Scala compiler * Copyright 2005-2013 LAMP/EPFL * @author Martin Odersky */ package scala.tools.nsc package symtab import classfile.ClassfileParser import java.io.IOException import scala.reflect.internal.MissingRequirementError import scala.reflect.internal.util.Statistics import scala.reflect.io.{AbstractFile, NoAbstractFile} import scala.tools.nsc.util.{ClassPath, ClassRepresentation} /** This class ... * * @author Martin Odersky * @version 1.0 */ abstract class SymbolLoaders { val symbolTable: symtab.SymbolTable { def settings: Settings } val platform: backend.Platform { val symbolTable: SymbolLoaders.this.symbolTable.type } import symbolTable._ /** * Required by ClassfileParser. Check documentation in that class for details. */ def lookupMemberAtTyperPhaseIfPossible(sym: Symbol, name: Name): Symbol /** * Should forward to `Run.compileLate`. The more principled fix would be to * determine why this functionality is needed and extract it into a separate * interface. */ protected def compileLate(srcfile: AbstractFile): Unit import SymbolLoadersStats._ protected def enterIfNew(owner: Symbol, member: Symbol, completer: SymbolLoader): Symbol = { assert(owner.info.decls.lookup(member.name) == NoSymbol, owner.fullName + "." + member.name) owner.info.decls enter member member } protected def signalError(root: Symbol, ex: Throwable) { if (settings.debug) ex.printStackTrace() globalError(ex.getMessage() match { case null => "i/o error while loading " + root.name case msg => "error while loading " + root.name + ", " + msg }) } def newClass(owner: Symbol, name: String): ClassSymbol = owner.newClass(newTypeName(name)) /** Enter class with given `name` into scope of `root` * and give them `completer` as type. */ def enterClass(owner: Symbol, name: String, completer: SymbolLoader): Symbol = enterClass(owner, newClass(owner, name), completer) def enterClass(owner: Symbol, clazz: ClassSymbol, completer: SymbolLoader): Symbol = { clazz setInfo completer enterIfNew(owner, clazz, completer) } def newModule(owner: Symbol, name: String): ModuleSymbol = owner.newModule(newTermName(name)) /** Enter module with given `name` into scope of `root` * and give them `completer` as type. */ def enterModule(owner: Symbol, name: String, completer: SymbolLoader): Symbol = enterModule(owner, newModule(owner, name), completer) def enterModule(owner: Symbol, module: ModuleSymbol, completer: SymbolLoader): Symbol = { module setInfo completer module.moduleClass setInfo moduleClassLoader enterIfNew(owner, module, completer) } /** Enter package with given `name` into scope of `root` * and give them `completer` as type. */ def enterPackage(root: Symbol, name: String, completer: SymbolLoader): Symbol = { val pname = newTermName(name) val preExisting = root.info.decls lookup pname if (preExisting != NoSymbol) { // Some jars (often, obfuscated ones) include a package and // object with the same name. Rather than render them unusable, // offer a setting to resolve the conflict one way or the other. // This was motivated by the desire to use YourKit probes, which // require yjp.jar at runtime. See SI-2089. if (settings.termConflict.isDefault) throw new TypeError( s"$root contains object and package with same name: $name\\none of them needs to be removed from classpath" ) else if (settings.termConflict.value == "package") { warning( "Resolving package/object name conflict in favor of package " + preExisting.fullName + ". The object will be inaccessible." ) root.info.decls.unlink(preExisting) } else { warning( "Resolving package/object name conflict in favor of object " + preExisting.fullName + ". The package will be inaccessible." ) return NoSymbol } } // todo: find out initialization sequence for pkg/pkg.moduleClass is different from enterModule val pkg = root.newPackage(pname) pkg.moduleClass setInfo completer pkg setInfo pkg.moduleClass.tpe root.info.decls enter pkg pkg } /** Enter class and module with given `name` into scope of `root` * and give them `completer` as type. */ def enterClassAndModule(root: Symbol, name: String, getCompleter: (ClassSymbol, ModuleSymbol) => SymbolLoader) { val clazz = newClass(root, name) val module = newModule(root, name) val completer = getCompleter(clazz, module) enterClass(root, clazz, completer) enterModule(root, module, completer) if (!clazz.isAnonymousClass) { // Diagnostic for SI-7147 def msg: String = { def symLocation(sym: Symbol) = if (sym == null) "null" else s"${clazz.fullLocationString} (from ${clazz.associatedFile})" sm"""Inconsistent class/module symbol pair for `$name` loaded from ${symLocation(root)}. |clazz = ${symLocation(clazz)}; clazz.companionModule = ${clazz.companionModule} |module = ${symLocation(module)}; module.companionClass = ${module.companionClass}""" } assert(clazz.companionModule == module, msg) assert(module.companionClass == clazz, msg) } } /** In batch mode: Enter class and module with given `name` into scope of `root` * and give them a source completer for given `src` as type. * In IDE mode: Find all toplevel definitions in `src` and enter then into scope of `root` * with source completer for given `src` as type. * (overridden in interactive.Global). */ def enterToplevelsFromSource(root: Symbol, name: String, src: AbstractFile) { enterClassAndModule(root, name, (_, _) => new SourcefileLoader(src)) } /** The package objects of scala and scala.reflect should always * be loaded in binary if classfiles are available, even if sourcefiles * are newer. Late-compiling these objects from source leads to compilation * order issues. * Note: We do a name-base comparison here because the method is called before we even * have ReflectPackage defined. */ def binaryOnly(owner: Symbol, name: String): Boolean = name == "package" && (owner.fullName == "scala" || owner.fullName == "scala.reflect") /** Initialize toplevel class and module symbols in `owner` from class path representation `classRep` */ def initializeFromClassPath(owner: Symbol, classRep: ClassRepresentation) { ((classRep.binary, classRep.source) : @unchecked) match { case (Some(bin), Some(src)) if platform.needCompile(bin, src) && !binaryOnly(owner, classRep.name) => if (settings.verbose) inform("[symloader] picked up newer source file for " + src.path) enterToplevelsFromSource(owner, classRep.name, src) case (None, Some(src)) => if (settings.verbose) inform("[symloader] no class, picked up source file for " + src.path) enterToplevelsFromSource(owner, classRep.name, src) case (Some(bin), _) => enterClassAndModule(owner, classRep.name, new ClassfileLoader(bin, _, _)) } } /** * A lazy type that completes itself by calling parameter doComplete. * Any linked modules/classes or module classes are also initialized. * Todo: consider factoring out behavior from TopClassCompleter/SymbolLoader into * supertrait SymLoader */ abstract class SymbolLoader extends SymLoader { /** Load source or class file for `root`, return */ protected def doComplete(root: Symbol): Unit def sourcefile: Option[AbstractFile] = None /** * Description of the resource (ClassPath, AbstractFile) * being processed by this loader */ protected def description: String private var ok = false private def setSource(sym: Symbol) { sourcefile foreach (sf => sym match { case cls: ClassSymbol => cls.associatedFile = sf case mod: ModuleSymbol => mod.moduleClass.associatedFile = sf case _ => () }) } override def complete(root: Symbol) { try { val start = java.util.concurrent.TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) val currentphase = phase doComplete(root) phase = currentphase informTime("loaded " + description, start) ok = true setSource(root) setSource(root.companionSymbol) // module -> class, class -> module } catch { case ex @ (_: IOException | _: MissingRequirementError) => ok = false signalError(root, ex) } initRoot(root) if (!root.isPackageClass) initRoot(root.companionSymbol) } override def load(root: Symbol) { complete(root) } private def markAbsent(sym: Symbol): Unit = { val tpe: Type = if (ok) NoType else ErrorType if (sym != NoSymbol) sym setInfo tpe } private def initRoot(root: Symbol) { if (root.rawInfo == this) List(root, root.moduleClass) foreach markAbsent else if (root.isClass && !root.isModuleClass) root.rawInfo.load(root) } } private def phaseBeforeRefchecks: Phase = { var resPhase = phase while (resPhase.refChecked) resPhase = resPhase.prev resPhase } /** * Loads contents of a package */ class PackageLoader(packageName: String, classPath: ClassPath) extends SymbolLoader with FlagAgnosticCompleter { protected def description = { val shownPackageName = if (packageName == ClassPath.RootPackage) "<root package>" else packageName s"package loader $shownPackageName" } protected def doComplete(root: Symbol) { assert(root.isPackageClass, root) root.setInfo(new PackageClassInfoType(newScope, root)) val classPathEntries = classPath.list(packageName) if (!root.isRoot) for (entry <- classPathEntries.classesAndSources) initializeFromClassPath(root, entry) if (!root.isEmptyPackageClass) { for (pkg <- classPathEntries.packages) { val fullName = pkg.name val name = if (packageName == ClassPath.RootPackage) fullName else fullName.substring(packageName.length + 1) val packageLoader = new PackageLoader(fullName, classPath) enterPackage(root, name, packageLoader) } openPackageModule(root) } } } val completedClassfiles = scala.collection.mutable.ListBuffer[AbstractFile]() class ClassfileLoader(val classfile: AbstractFile, clazz: ClassSymbol, module: ModuleSymbol) extends SymbolLoader with FlagAssigningCompleter { private object classfileParser extends { val symbolTable: SymbolLoaders.this.symbolTable.type = SymbolLoaders.this.symbolTable } with ClassfileParser { override protected type ThisConstantPool = ConstantPool override protected def newConstantPool: ThisConstantPool = new ConstantPool override protected def lookupMemberAtTyperPhaseIfPossible(sym: Symbol, name: Name): Symbol = SymbolLoaders.this.lookupMemberAtTyperPhaseIfPossible(sym, name) /* * The type alias and the cast (where the alias is used) is needed due to problem described * in SI-7585. In this particular case, the problem is that we need to make sure that symbol * table used by symbol loaders is exactly the same as they one used by classfileParser. * If you look at the path-dependent types we have here everything should work out ok but * due to issue described in SI-7585 type-checker cannot tie the knot here. * */ private type SymbolLoadersRefined = SymbolLoaders { val symbolTable: classfileParser.symbolTable.type } val loaders = SymbolLoaders.this.asInstanceOf[SymbolLoadersRefined] override def classPath: ClassPath = platform.classPath } protected def description = "class file "+ classfile.toString protected def doComplete(root: Symbol) { completedClassfiles += classfile val start = if (Statistics.canEnable) Statistics.startTimer(classReadNanos) else null classfileParser.parse(classfile, clazz, module) if (root.associatedFile eq NoAbstractFile) { root match { // In fact, the ModuleSymbol forwards its setter to the module class case _: ClassSymbol | _: ModuleSymbol => debuglog("ClassfileLoader setting %s.associatedFile = %s".format(root.name, classfile)) root.associatedFile = classfile case _ => debuglog("Not setting associatedFile to %s because %s is a %s".format(classfile, root.name, root.shortSymbolClass)) } } if (Statistics.canEnable) Statistics.stopTimer(classReadNanos, start) } override def sourcefile: Option[AbstractFile] = classfileParser.srcfile } class SourcefileLoader(val srcfile: AbstractFile) extends SymbolLoader with FlagAssigningCompleter { protected def description = "source file "+ srcfile.toString override def fromSource = true override def sourcefile = Some(srcfile) protected def doComplete(root: Symbol): Unit = compileLate(srcfile) } object moduleClassLoader extends SymbolLoader with FlagAssigningCompleter { protected def description = "module class loader" protected def doComplete(root: Symbol) { root.sourceModule.initialize } } /** used from classfile parser to avoid cycles */ var parentsLevel = 0 var pendingLoadActions: List[() => Unit] = Nil } object SymbolLoadersStats { import scala.reflect.internal.TypesStats.typerNanos val classReadNanos = Statistics.newSubTimer ("time classfilereading", typerNanos) }
jvican/scala
src/compiler/scala/tools/nsc/symtab/SymbolLoaders.scala
Scala
bsd-3-clause
13,759
package model import org.bson.types.ObjectId import org.joda.time.{Interval, LocalDate, DateTime} case class Donation( id: ObjectId = new ObjectId, posted: DateTime, donation: String, location: String, collectDate: LocalDate, collectTime: Interval, weight: Double, status: DonationStatus.Value, charity: String) { }
foodcloud/bonobo
app/model/Donation.scala
Scala
apache-2.0
509
/* Copyright 2009-2021 EPFL, Lausanne */ package stainless package ast trait TypeOps extends inox.ast.TypeOps { protected val trees: Trees import trees._ import symbols.{given, _} def unapplyAccessorResultType(id: Identifier, inType: Type): Option[Type] = lookupFunction(id) .filter(_.params.size == 1) .flatMap { fd => instantiation(fd.params.head.tpe, inType) .filter(tpMap => fd.typeArgs forall (tpMap contains _)) .map(typeOps.instantiateType(fd.returnType, _)) } def patternIsTyped(in: Type, pat: Pattern): Boolean = { require(in != Untyped) pat match { case WildcardPattern(ob) => ob.forall(vd => isSubtypeOf(in, vd.getType)) case LiteralPattern(ob, lit) => ob.forall(vd => isSubtypeOf(vd.getType, in)) && isSubtypeOf(lit.getType, in) case ADTPattern(ob, id, tps, subs) => in.getType match { case ADTType(sort, tps2) => tps.map(_.getType) == tps2 && ob.forall(vd => isSubtypeOf(vd.getType, in)) && lookupConstructor(id).exists { cons => cons.sort == sort && cons.fields.size == subs.size && lookupSort(sort).exists(sort => sort.tparams.size == tps.size) && (cons.typed(tps).fields zip subs).forall { case (vd, sub) => patternIsTyped(vd.getType, sub) } } case _ => false } case TuplePattern(ob, subs) => in match { case TupleType(tps) => tps.size == subs.size && ob.forall(vd => isSubtypeOf(vd.getType, in)) && ((tps zip subs) forall (patternIsTyped(_, _)).tupled) case _ => false } case up @ UnapplyPattern(ob, recs, id, tps, subs) => ob.forall(vd => isSubtypeOf(vd.getType, in)) && lookupFunction(id).exists(_.tparams.size == tps.size) && { val unapp = up.getFunction unapp.params.nonEmpty && ob.forall(vd => isSubtypeOf(unapp.params.last.getType, vd.getType)) && (recs zip unapp.params.init).forall { case (r, vd) => isSubtypeOf(r.getType, vd.getType) } && unapp.flags .collectFirst { case IsUnapply(isEmpty, get) => (isEmpty, get) } .exists { case (isEmpty, get) => unapplyAccessorResultType(isEmpty, unapp.returnType).exists(isSubtypeOf(_, BooleanType())) && unapplyAccessorResultType(get, unapp.returnType).exists { case TupleType(tps) => tps.size == subs.size && ((tps zip subs) forall (patternIsTyped(_, _)).tupled) case tpe if subs.size == 1 => patternIsTyped(tpe, subs.head) case UnitType() if subs.isEmpty => true case _ => false } } } } } def replaceKeepPositions(subst: Map[Variable, Expr], tpe: Type): Type = { new ConcreteStainlessSelfTreeTransformer { override def transform(expr: Expr): Expr = expr match { case v: Variable => subst.getOrElse(v, v).copiedFrom(v) case _ => super.transform(expr) } }.transform(tpe) } }
epfl-lara/stainless
core/src/main/scala/stainless/ast/TypeOps.scala
Scala
apache-2.0
3,142
package com.twitter.finagle.context import com.twitter.finagle.benchmark.StdBenchAnnotations import org.openjdk.jmh.annotations.{Benchmark, Level, Param, Scope, Setup, State} import org.openjdk.jmh.infra.Blackhole // ./sbt 'project finagle-benchmark' 'jmh:run LocalContextBenchmark -prof gc' @State(Scope.Benchmark) class LocalContextBenchmark extends StdBenchAnnotations { import com.twitter.finagle.context.Contexts.local @Param(Array("5")) var depth: Int = 0 var env: Map[Contexts.local.Key[_], Any] = _ val unusedKey = new local.Key[Any] val realKey = new local.Key[Int] @Setup(Level.Iteration) def setup(): Unit = { val pairs = (0 until depth).map { i => val k = new local.Key[Int] local.KeyValuePair(k, 0) } :+ local.KeyValuePair(realKey, 10) env = local.let(pairs) { local.env } } @Benchmark def let(): Int = doInContext(doLet) private val doLet: () => Int = () => { local.let(realKey, 15) { Int.MaxValue } } @Benchmark def get(): Boolean = doInContext(doGet) private val doGet: () => Boolean = () => { local.get(unusedKey).isEmpty && local.get(realKey).isDefined } @Benchmark def getOrElse(bh: Blackhole): Int = doInContext(doGetOrElse, bh) private val doGetOrElse: (Blackhole) => Int = (bh: Blackhole) => { bh.consume { local.getOrElse(unusedKey, () => None) } local.getOrElse(realKey, () => ???) } @Benchmark def letClear(): Int = doInContext(doLetClear) private val doLetClear: () => Int = () => { local.letClear(realKey) { Int.MaxValue } } def doInContext[T](fn: () => T): T = { local.letLocal(env)(fn()) } def doInContext[T](fn: Blackhole => T, bh: Blackhole): T = { local.letLocal(env)(fn(bh)) } }
twitter/finagle
finagle-benchmark/src/main/scala/com/twitter/finagle/context/LocalContextBenchmark.scala
Scala
apache-2.0
1,776
/* ____ __ ____ ____ ____,,___ ____ __ __ ____ * ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read * ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt * (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt */ package razie.diesel.engine.exec import org.apache.commons.codec.digest.DigestUtils import razie.clog import razie.diesel.Diesel import razie.diesel.dom.RDOM._ import razie.diesel.dom.{RDOM, _} import razie.diesel.engine.DomEngineSettings.DIESEL_USER_ID import razie.diesel.engine._ import razie.diesel.engine.nodes._ import razie.diesel.expr.{AExprFunc, DieselExprException, ECtx, StaticECtx} import razie.diesel.model.DieselMsg import razie.tconf.DUsers import razie.tconf.hosting.Reactors import razie.wiki.Base64 import razie.wiki.parser.CsvParser import scala.collection.concurrent.TrieMap import scala.collection.mutable.{HashMap, ListBuffer} import scala.util.Try object EECtx { final val CTX = "ctx" } /** executor for "ctx." messages - operations on the current context */ class EECtx extends EExecutor(EECtx.CTX) { import razie.diesel.engine.exec.EECtx.CTX /** map of active contexts per transaction */ val contexts = new TrieMap[String, ECtx]() override def isMock: Boolean = true override def test(ast: DomAst, m: EMsg, cole: Option[MatchCollector] = None)(implicit ctx: ECtx) = { m.entity == CTX // todo why not && messages.exists(_.met == m.met) } override def apply(in: EMsg, destSpec: Option[EMsg])(implicit ctx: ECtx): List[Any] = { // todo I don't think i need this - the context should be a static msg context with all those anyways def parm(s: String): Option[P] = in.attrs.find(_.name == s).orElse(ctx.getp(s)) in.met match { case "regex" => { val payload = in.attrs.find(_.name == Diesel.PAYLOAD).getOrElse(ctx.getRequiredp(Diesel.PAYLOAD)) val re = ctx.getp("regex").orElse(in.attrs.headOption) if(re.isEmpty) { List(EError("Need at least a regex parameter")) } else { // for regex matches, use each capture group and set as parm in context // extract parms val groups = EContent.extractRegexParms(re.get.calculatedValue, payload.calculatedValue) groups.map(t => EVal(P(t._1, t._2))) } } case "persisted" => { contexts.get(ctx("kind") + ctx("id")).map(x => if (ctx != x) ctx.root.overwrite(x) ).getOrElse { contexts.put(ctx("kind") + ctx("id"), ctx.root) // should I save this one? } Nil } case "log" => { clog << "ctx.log " + ctx.toString Nil } case "info" => { in.attrs.headOption.toList.map(p => EInfo(p.name + " - click me", p.calculatedValue) ) } case "test" => { clog << "ctx.test " + ctx.toString Nil } case "clear" => { ctx.getScopeCtx.clear ctx.clear Nil } case "reset" => { ctx.getScopeCtx.clear ctx.clear Nil } case "map" => { // l can be a constant with another parm name OR the actual array val list = { val l = ctx.getRequiredp("list").calculatedP if(l.isOfType(WTypes.wt.ARRAY)) { l } else if(l.isOfType(WTypes.wt.STRING)) { ctx.getRequiredp(l.currentStringValue) } else { P("", "", WTypes.wt.UNDEFINED) //throw new IllegalArgumentException(s"Can't source input list: $ctxList") } } val ea = parm("msg").get.currentStringValue val EMsg.REGEX(e, m) = ea val x = if(list.calculatedTypedValue.contentType == WTypes.ARRAY) list.calculatedTypedValue.asArray else if(list.calculatedTypedValue.contentType == WTypes.UNDEFINED) Nil else razie.js.parse(s"{ list : ${list.calculatedValue} }").apply("list") x match { case l: collection.Seq[Any] => { val nat = in.attrs.filter(e => !Array("list", "item", "msg").contains(e.name)) val res = l.map { item: Any => val itemP = P.fromTypedValue(parm("item").get.currentStringValue, item) val args = itemP :: nat val out = AExprFunc(ea, args).applyTyped("") out.calculatedTypedValue.value } List(EVal(P.fromTypedValue(Diesel.PAYLOAD, res))) } case x@_ => { List(EError("map works on lists/arrays, found a: " + x.getClass.getName)) } } } // case "batch" => { // val b = ctx.getRequiredp("batches").calculatedP.value.get.asInt // val s = ctx.getp("start").map(_.calculatedP.value.get.asInt).getOrElse(0) // // val EMsg.REGEX(e, m) = parm("msg").get.currentStringValue // val itemName = parm("item").get.currentStringValue // // // passing any other parameters that were given to foreach // val nat = in.attrs.filter(e => !Array("list", "item", "msg").contains(e.name)) // // (s .. b).map { item: Any => // // for each item in list, create message // val itemP = P.fromTypedValue(itemName, item) // EMsg(e, m, itemP :: nat) // }.toList ::: info // } case "foreach" => { var info: List[Any] = Nil // l can be a constant with another parm name OR the actual array val list = { val l = ctx.getRequiredp("list").calculatedP if (l.isOfType(WTypes.wt.ARRAY)) { l } else if (l.isOfType(WTypes.wt.STRING)) { ctx.getRequiredp(l.currentStringValue) } else { info = EWarning(s"Can't source input list - what type is it? ${l}") :: info P("", "", WTypes.wt.UNDEFINED) //throw new IllegalArgumentException(s"Can't source input list: $ctxList") } } val EMsg.REGEX(e, m) = parm("msg").get.currentStringValue val itemName = parm("item").get.currentStringValue val kidz = try { razie.js.parse(s"{ list : ${list.currentStringValue} }").apply("list") match { case l: collection.Seq[Any] => { // passing any other parameters that were given to foreach val nat = in.attrs.filter(e => !Array("list", "item", "msg").contains(e.name)) l.map { item: Any => // for each item in list, create message val itemP = P.fromTypedValue(itemName, item) new EMsg(e, m, itemP :: nat) with KeepOnlySomeSiblings {keepCount = 5} }.toList ::: info } case x@_ => { List(EError("value to iterate on was not a list", x.getClass.getName) :: info) } } } catch { case throwable: Throwable => throw new DieselExprException( s"Caught ${throwable.toString} while evaluating ctx.foreach for list: " + list.currentStringValue) } kidz } // nice print of either input parms of default payload case "echo" => { val toPrint = if (in.attrs.nonEmpty) in.attrs else ctx.getp(Diesel.PAYLOAD).toList val res = toPrint.map { p => EInfo(p.toHtml, p.calculatedTypedValue.asNiceString) } if (res.isEmpty) List(EInfo("No arguments with values found...")) else res } case "setVal" => { // setVal takes the name in a variable val n = in.attrs.find(_.name == "name").map(_.currentStringValue) val v = in.attrs.find(_.name == "value") // at this point the val res = n.flatMap { name => if (v.exists(_.hasCurrentValue)) Some(new EVal(name, v.get.currentStringValue)) else if (v.exists(_.expr.isDefined)) Some(new EVal(v.get.expr.get.applyTyped("").copy(name = name))) else if (v.exists(_.ttype != WTypes.wt.UNDEFINED)) Some(new EVal(name, v.get.currentStringValue)) // for set (x="") else { // clear it def clear(c: ECtx) { c.remove(name) c.base.foreach(clear) } clear(ctx) Some(new EInfo("removed " + name)) } }.orElse { v.map(_.calculatedP) // just v - copy it }.toList // ctx.set goes to the enclosing scope res.collect { case ev: EVal => DomRoot.setValueInScopeContext(ctx, ev.p) } res } case "export" => { // special export to scope - gets extra parm "toExport" if (in.attrs.find(_.name == "toExport").isEmpty) throw new DieselExprException( "ctx.export requires argument *toExport*") val ex = in.attrs.find(_.name == "toExport").get val res = in.attrs.filter(_.name != "toExport").map { p => if (p.hasCurrentValue) // calculated already Some(new EVal(p)) else if (p.expr.isDefined) // calculate now Some(new EVal(p.expr.get.applyTyped("").copy(name = p.name))) else if (p.ttype != WTypes.wt.UNDEFINED) Some(new EVal(p)) // set(x="") is not undefined... else { // clear it ctx.getScopeCtx.remove(p.name) None } }.filter(_.isDefined).map(_.get) if (res.isEmpty) { // parm was UNDEFINED and filtered out of attrs, remove it ctx.getScopeCtx.remove(ex.currentStringValue) } // ctx.set goes to the enclosing scope res.foreach(v => // not doing this for exports - that's just scope normal parms - see specs tests, they fail this way //ctx.root.engine.map(_.setoSmartValueInContext(None, ctx.getScopeCtx, v.p)) // INSTEAD: setting normally DomRoot.setValueInScopeContext(ctx, v.p) ) res } case "set" => { // set all parms passed in - return EVals and make sure they're set in context // important to know how the scope contexts work val res = in.attrs.map { p => if (p.hasCurrentValue) // calculated already Some(new EVal(p)) else if (p.expr.isDefined) // calculate now Some(new EVal(p.expr.get.applyTyped("").copy(name = p.name))) else if (p.ttype != WTypes.wt.UNDEFINED) Some(new EVal(p)) // set(x="") is not undefined... else { // clear it ctx.getScopeCtx.remove(p.name) None } }.filter(_.isDefined).map(_.get) // ctx.set goes to the enclosing scope res.foreach(v => // if(v.p.name contains ".") { ctx.root.engine.map(_.setoSmartValueInContext(None, ctx.getScopeCtx, v.p)) // } else { // DomRoot.setValueInScopeContext(ctx, v.p) // } ) res } case "setAll" => { // input is json - set all fields as ctx vals val res = in.attrs.map(_.calculatedP).filter(_.ttype == WTypes.JSON).flatMap { p => p.calculatedTypedValue.asJson.map { t => new EVal(P.fromTypedValue(t._1, t._2)) } } // ctx.set goes to the enclosing scope res.foreach(v => DomRoot.setValueInScopeContext(ctx, v.p)) res } case "debug" => { EInfo("Local attrs:") :: cdebug(in.attrs) } // debug current context case "trace" => { EInfo("All flattened looking up:") :: ctrace(ctx) ::: EInfo("-----------debug:") :: cdebug(in.attrs) } // url safe version case "urlbase64encode" => { val res = in.attrs.filter(_.name != Diesel.RESULT).map { a => import org.apache.commons.codec.binary.Base64 def enc(s: String) = new Base64(true).encode(s.getBytes) val res = enc(a.calculatedValue) new EVal(a.name, new String(res).replaceAll("\\n", "").replaceAll("\\r", "")) } res ::: res.headOption.map(x=> x.copy(p=x.p.copy(name=Diesel.PAYLOAD))).toList } // normal base64 encoder case "base64encode" => { val res = in.attrs.filter(_.name != Diesel.RESULT).map { a => import org.apache.commons.codec.binary.Base64 def enc(s: String) = new Base64(false).encode(s.getBytes) val res = enc(a.calculatedValue) new EVal(a.name, new String(res).replaceAll("\\n", "").replaceAll("\\r", "")) } res ::: res.headOption.map(x=> x.copy(p=x.p.copy(name=Diesel.PAYLOAD))).toList } // take all args and create a json doc with them case "mkString" => { val pre = ctx.getp("pre").map(_.calculatedValue).getOrElse("") val sep = ctx.getp("separator").map(_.calculatedValue).getOrElse(",") val post = ctx.getp("post").map(_.calculatedValue).getOrElse("") // l can be a constant with another parm name OR the actual array val list = { val l = ctx.getp("list").getOrElse(ctx.getRequiredp(Diesel.PAYLOAD)).calculatedP if(l.isOfType(WTypes.wt.ARRAY)) { val arr = l.calculatedTypedValue.asArray arr } else { // info = EWarning(s"Can't source input list - what type is it? ${l}") :: info throw new IllegalArgumentException(s"Can't source input list: $l") } } val rows = list.map { obj => PValue(obj).asString }.mkString(pre, sep, post) new EVal( RDOM.P.fromTypedValue(Diesel.PAYLOAD, rows, WTypes.wt.STRING) ) :: Nil } // take all args and create a json doc with them case "csv" | "jsonToCsv" => { val separator = ctx.getRequired("separator") val useHeaders = ctx.get("useHeaders").getOrElse("true").toBoolean // l can be a constant with another parm name OR the actual array val list = { val l = ctx.getp("list").getOrElse(ctx.getRequiredp(Diesel.PAYLOAD)).calculatedP if (l.isOfType(WTypes.wt.ARRAY)) { val arr = l.calculatedTypedValue.asArray arr } else { // info = EWarning(s"Can't source input list - what type is it? ${l}") :: info throw new IllegalArgumentException(s"Can't source input list: $l") } } // collecting field names here to avoid empty var inames = new collection.mutable.ListBuffer[String]() val objects = list.map { obj => val m = PValue(obj).asJson // val m = p.calculatedTypedValue.asJson // collect new names inames.appendAll(m.keys.filter(x => !inames.contains(x))) m }.toList val names = inames.toList // collect new names var rows = objects.map { m => names.map { n => m .get(n) .filter(P.isSimpleType) .map(x => { if (P.isSimpleNonStringType(x)) { P.asString(x) } else { "\\"" + { val s = P.asString(x) s .replaceAll("\\"", "\\"\\"") // .replaceAll(separator, "\\"" + separator + "\\"") } + "\\"" } } ) .getOrElse("") }.mkString(separator) } rows = (if (useHeaders) List(names.mkString(separator)) else Nil) ++ rows new EVal( RDOM.P.fromTypedValue(Diesel.PAYLOAD, rows, WTypes.wt.ARRAY) ) :: new EVal( RDOM.P.fromTypedValue("csvHeaders", names, WTypes.wt.ARRAY) ) :: Nil } // incoming csv parsed into json, based on header field. // if no header, fields will be "col0"..."colN" case "csvToJson" => { val separator = ctx.getRequired("separator") val hasHeaders = ctx.get("hasHeaders").getOrElse("true").toBoolean val payload = ctx.getRequired(Diesel.PAYLOAD) var headers = new Array[String](0) val result: ListBuffer[Any] = new ListBuffer[Any]() // 1. parse into lines val parser = new CsvParser() { def doit(s: String, delim: String) = { parseAll(csv(separator), payload) match { case Success(value, _) => value.filter(_.nonEmpty) case NoSuccess(msg, _) => { result.append(EError(msg, msg)) Nil } //todo ? throw new DieselExprException("Parsing error: " + msg) } } def consts(s: String) = { parseAll(csvnumConst, s) match { case Success(value, _) => value case NoSuccess(msg, _) => { s // not matched, keep it } //todo ? throw new DieselExprException("Parsing error: " + msg) } } } var lines: List[List[String]] = parser.doit(payload, separator) if (hasHeaders) { headers = lines.head.toArray lines = lines.drop(1) } val res = lines.map(l => { val m = new HashMap[String, Any]() l.zipWithIndex.foreach(x => { val k = if (hasHeaders) headers(x._2) else "col" + x._2 var v = x._1 if (v.trim.startsWith("\\"")) { val y = v.replaceFirst("^\\"", "").replaceFirst("\\"$", "") m.put(k, y) } else { val y = parser.consts(v) if (y != null) { m.put(k, y) } } }) m }) result.append( new EVal(RDOM.P.fromTypedValue(Diesel.PAYLOAD, res.toList, WTypes.wt.ARRAY)) ) result.toList } // take all args and create a json doc with them case "json" => { val res = in.attrs.map(a => (a.name, a.calculatedTypedValue.value)).toMap new EVal( RDOM.P.fromTypedValue(Diesel.PAYLOAD, res, WTypes.wt.JSON) ) :: Nil } case "base64decode" => { val res = in.attrs.filter(_.name != Diesel.RESULT).map { a => val res = Base64.dec(a.calculatedValue) new EVal(RDOM.P(a.name, "", WTypes.wt.BYTES, None, "", Some(PValue[Array[Byte]](res, "application/octet-stream")))) } val res2 = res ::: res.headOption.map(x => x.copy(p = x.p.copy(name = Diesel.PAYLOAD))).toList res2 } case "sha1" => { val res = in.attrs.filter(_.name != Diesel.RESULT).map { a => val md = java.security.MessageDigest.getInstance("SHA-1") val s = md.digest(a.currentStringValue.getBytes("UTF-8")).map("%02X".format(_)).mkString // val sb = DigestUtils.sha1Hex(a.dflt) new EVal(a.name + "_sha1", s) //:: new EVal(a.name+"_sha1j", sb) :: Nil } res ::: in.attrs .find(_.name == Diesel.RESULT) .map(_.calculatedValue) .orElse(Some(Diesel.PAYLOAD)) .map(p => new EVal(p, res.head.p.currentStringValue)) .toList } case "sha256" => { val res = in.attrs.filter(_.name != Diesel.RESULT).map { a => val md = java.security.MessageDigest.getInstance("SHA-256") val s = DigestUtils.sha256Hex(a.currentStringValue) new EVal(a.name + "_sha256", s) } res ::: in.attrs .find(_.name == Diesel.RESULT) .map(_.calculatedValue) .orElse(Some(Diesel.PAYLOAD)) .map(p => new EVal(p, res.head.p.currentStringValue)) .toList } case "timer" => { val d = in.attrs.find(_.name == "duration").map(_.currentStringValue.toInt).getOrElse(1000) val m = in.attrs.find(_.name == "msg").map(_.currentStringValue).getOrElse( "$msg ctx.echo (msg=\\"timer without message\\")") DieselAppContext ! DEStartTimer("x", d, Nil) new EInfo("ctx.timer - start " + d) :: Nil } case "sleep" => { /* this is not just asynchronous - but also 1. suspends the engine 2. ask the engine to send itself a continuation later DELater 3. continuation DEComplete */ val d = in.attrs.find(_.name == "duration").map(_.calculatedTypedValue.asLong.toInt).getOrElse(1000) EInfo("ctx.sleep - slept " + d) :: EEngSuspend("ctx.sleep", "", Some((e, a, l) => { DieselAppContext ! DELater(e.id, d, DEComplete(e.id, a.id, recurse = true, l, Nil)) })) :: Nil } case "authUser" => { val uid = // the engine got it from the session/cookie ctx.root.engine.flatMap(_.settings.userId) orElse // or some test set it ctx.get(DIESEL_USER_ID) // todo lookup the user - either in DB or wix if (uid.isDefined) new EInfo("User is auth ") :: Nil else new EVal(DieselMsg.HTTP.STATUS, "401") :: new EVal(Diesel.PAYLOAD, "Error: User not auth") :: // payload will be shown, needs reset new EError(s"ctx.authUser - User not auth") :: new EEngStop(s"User not auth") :: Nil } case "setAuthUser" => setAuthUser(ctx) case s@_ => { new EError(s"ctx.$s - unknown activity ") :: Nil } } } def setAuthUser (ctx:ECtx) = { /** run tests in the context of a user, configurable per domain */ // was this engine triggered for a user ? like in a fiddle? Use that one val root = ctx.root val uid = ctx.root.engine.flatMap(_.settings.userId) if (uid.isEmpty) { // if no auth user, use the default - same default used for xapikey auth val uid = root.settings.realm .map(Reactors.impl.getProperties) .flatMap(_.get("diesel.xapikeyUserEmail")) .flatMap(DUsers.impl.findUserByEmailDec) .map(_.id) .getOrElse( "4fdb5d410cf247dd26c2a784" // an inactive account: Harry ) // put straight in context - bypass trace nodes visible to users... ctx.put(P(DIESEL_USER_ID, uid)) new EInfo("User is now auth ") :: Nil } else new EInfo("User was already auth ") :: Nil } // debug current context def cdebug(in: List[P])(implicit ctx:ECtx) : List[EInfo] = { in.map { p => new EInfo(s"${p.name} = ${p.currentStringValue} expr=(${p.expr}) cv= ${p.calculatedValue}") } ::: (new EInfo(s"Ctx.listAttrs: ${ctx.getClass.getName}") :: ctx.listAttrs.map { p => Try { new EInfo(s"${p.name} = ${p.currentStringValue} expr=(${p.expr}) cv= ${p.calculatedValue}") }.recover { case ex => new EInfo(s"${p.name} = ${p.currentStringValue} expr=(${p.expr}) cv= EXCEPTION: $ex") }.get }) } // trace all contexts looking up def ctrace(c:ECtx)(implicit ctx:ECtx) : List[Any] = { EInfo("--------") :: (c match { case c@_ => { EInfo(c.toString) :: Nil } }) :: c.base.toList.flatMap(ctrace) } override def toString = "$executor::ctx " override val messages: List[EMsg] = EMsg(CTX, "persisted") :: EMsg(CTX, "log") :: EMsg(CTX, "echo") :: EMsg(CTX, "test") :: EMsg(CTX, "storySync") :: // processed by the story teller EMsg(CTX, "storyAsync") :: // processed by the story teller EMsg(CTX, "clear") :: EMsg(CTX, "reset") :: EMsg(CTX, "timer") :: EMsg(CTX, "sleep") :: EMsg(CTX, "set") :: EMsg(CTX, "setVal") :: EMsg(CTX, "setAll") :: EMsg(CTX, "sha1") :: EMsg(CTX, "sha256") :: EMsg(CTX, "foreach") :: EMsg(CTX, "trace") :: EMsg(CTX, "debug") :: EMsg(CTX, "authUser") :: EMsg(CTX, "setAuthUser") :: EMsg(CTX, "json") :: EMsg(CTX, "csv") :: EMsg(CTX, "mkString") :: EMsg(CTX, "export") :: Nil }
razie/diesel-rx
diesel/src/main/scala/razie/diesel/engine/exec/EECtx.scala
Scala
apache-2.0
24,422
/** * Copyright (c) 2014 Marco Sarti <marco.sarti at gmail.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ package com.elogiclab.guardbee.core.authz import org.specs2.mutable.Specification import com.elogiclab.guardbee.core.Authentication import org.joda.time.DateTime import play.api.test.WithApplication import play.api.test.FakeApplication /** * @author Marco Sarti * */ object AuthzSpec extends Specification { object auth extends Authentication("username", "provider", None) { override val granted_roles = Seq("ROLE_USER", "ROLE_GUEST") } val expiredAuth = Authentication("username", "provider", None, DateTime.now.minusDays(10)) def TRUE(aut: Authentication) = true def FALSE(aut: Authentication) = false "authz" should { "pass if authentication is valid" in new WithApplication(app = FakeApplication(additionalPlugins = Seq("com.elogiclab.guardbee.core.GuardbeeServicePlugin"))) { IsAuthenticated(auth) must beTrue } "fail if authentication is expired" in new WithApplication(app = FakeApplication(additionalPlugins = Seq("com.elogiclab.guardbee.core.GuardbeeServicePlugin"))) { IsAuthenticated(expiredAuth) must beFalse } "AllOf must pass if all elements return true" in { AllOf(TRUE, TRUE)(auth) must beTrue } "AllOf must fail if one or more element return false" in { AllOf(TRUE, FALSE, TRUE)(auth) must beFalse AllOf(FALSE, TRUE, FALSE)(auth) must beFalse AllOf()(auth) must beFalse } "AtLeastOneOf must pass if one or more element return true" in { AtLeastOneOf(TRUE, FALSE, TRUE)(auth) must beTrue AtLeastOneOf(FALSE, TRUE, FALSE)(auth) must beTrue } "AtLeastOneOf must fail if no element return true" in { AtLeastOneOf(FALSE, FALSE, FALSE)(auth) must beFalse AtLeastOneOf()(auth) must beFalse } "MajorityOf must pass if the majority of element return true" in new WithApplication(app = FakeApplication(additionalPlugins = Seq("com.elogiclab.guardbee.core.GuardbeeServicePlugin"))) { MajorityOf(TRUE, FALSE, TRUE)(auth) must beTrue MajorityOf(FALSE, TRUE, TRUE, TRUE)(auth) must beTrue } "MajorityOf must fail if the majority of element does not return true" in new WithApplication(app = FakeApplication(additionalPlugins = Seq("com.elogiclab.guardbee.core.GuardbeeServicePlugin"))) { MajorityOf(FALSE, FALSE, FALSE)(auth) must beFalse MajorityOf(FALSE, FALSE, TRUE)(auth) must beFalse MajorityOf(FALSE, FALSE, TRUE, TRUE)(auth) must beFalse MajorityOf(FALSE, FALSE, FALSE, TRUE)(auth) must beFalse MajorityOf()(auth) must beFalse } "HasRole must pass if role is ROLE_USER" in new WithApplication(app = FakeApplication(additionalPlugins = Seq("com.elogiclab.guardbee.core.GuardbeeServicePlugin"))) { HasRole("ROLE_USER")(auth) must beTrue } "HasRole must fail if role is ROLE_ADMIN" in new WithApplication(app = FakeApplication(additionalPlugins = Seq("com.elogiclab.guardbee.core.GuardbeeServicePlugin"))) { HasRole("ROLE_ADMIN")(auth) must beFalse } } }
elogiclab/guardbee
modules/core/test/com/elogiclab/guardbee/core/authz/AuthzSpec.scala
Scala
mit
4,154
package models.generator.android import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers class AndroidJavaUtilTest extends AnyFlatSpec with Matchers with AndroidJavaUtil{ "toParameterName" should "convert array" in { toParamName("[some_param]", false) should be("SomeParam") toParamName("[some_param]", true) should be("someParam") } it should "convert object" in { toParamName("some_param", false) should be("SomeParam") toParamName("some_param", true) should be("someParam") toParamName("String", false) should be("String") toParamName("String", true) should be("string") } "makeNameSpace" should "respect reserved words" in { makeNameSpace("com.gilt.public.api") should be("com.gilt.public_.api") makeNameSpace("com.gilt.other.api") should be("com.gilt.other.api") } "isParameterArray" should "detect arrays" in { isParameterArray("[abc]") should be(true) isParameterArray("abc") should be(false) isParameterArray("[string]") should be(true) isParameterArray("string") should be(false) isParameterArray("not[string]") should be(false) isParameterArray("[string]not") should be(false) } "getArrayType" should "return array type" in { getArrayType("[abc]") should be("abc") getArrayType("[[abc]]") should be("[abc]") getArrayType("[long]") should be("long") getArrayType("not[long]") should be("not[long]") getArrayType("not[long]") should be("not[long]") getArrayType("[long]not") should be("[long]not") } "isParameterMap" should "detect maps" in { isParameterMap("map[abc]") should be(true) isParameterMap("map[string]") should be(true) isParameterMap("notmap[abc]") should be(false) isParameterMap("mapnot[abc]") should be(false) isParameterMap("[abc]map") should be(false) isParameterMap("[abc]notmap") should be(false) } "getMapType" should "return map type" in { getMapType("map[abc]") should be("abc") getMapType("map[map[abc]]") should be("map[abc]") getMapType("map[long]") should be("long") getMapType("notmap[long]") should be("notmap[long]") getMapType("map[long]not") should be("map[long]not") getMapType("map[map[[long]]]") should be("map[[long]]") getMapType("map[map[map[[long]]]]") should be("map[map[[long]]]") } "dataTypeFromField" should "produce simple types" in { dataTypeFromField("boolean", "com.apidoc.example").toString should be ("java.lang.Boolean") dataTypeFromField("long", "com.apidoc.example").toString should be ("java.lang.Long") dataTypeFromField("uuid", "com.apidoc.example").toString should be ("java.util.UUID") dataTypeFromField("date-iso8601", "com.apidoc.example").toString should be ("org.joda.time.DateTime") dataTypeFromField("date-time-iso8601", "com.apidoc.example").toString should be ("org.joda.time.DateTime") } "isModelNameWithPackage" should "return correctly" in { isModelNameWithPackage("abc") should be(false) isModelNameWithPackage("io.apibuilder.common.v0.models.reference") should be(true) } "capitalizeModelNameWithPackage" should "capitalize last word" in { capitalizeModelNameWithPackage("io.apibuilder.common.v0.models.reference") should be("io.apibuilder.common.v0.models.Reference") } it should "handle arrays" in { dataTypeFromField("[long]", "com.apidoc.example").toString should be ("java.lang.Long[]") dataTypeFromField("[string]", "com.apidoc.example").toString should be ("java.lang.String[]") dataTypeFromField("[CustomType]", "com.apidoc.example").toString should be ("com.apidoc.example.CustomType[]") } it should "handle maps" in { dataTypeFromField("map[long]", "com.apidoc.example").toString should be ("java.util.Map<java.lang.String, java.lang.Long>") dataTypeFromField("map[date-time-iso8601]", "com.apidoc.example").toString should be ("java.util.Map<java.lang.String, org.joda.time.DateTime>") dataTypeFromField("map[string]", "com.apidoc.example").toString should be ("java.util.Map<java.lang.String, java.lang.String>") dataTypeFromField("map[CustomType]", "com.apidoc.example").toString should be ("java.util.Map<java.lang.String, com.apidoc.example.CustomType>") } it should "handle map and array combinations" in { dataTypeFromField("map[map[CustomType]]", "com.apidoc.example").toString should be ("java.util.Map<java.lang.String, java.util.Map<java.lang.String, com.apidoc.example.CustomType>>") dataTypeFromField("map[map[[CustomType]]]", "com.apidoc.example").toString should be ("java.util.Map<java.lang.String, java.util.Map<java.lang.String, com.apidoc.example.CustomType[]>>") } "replaceEnumsPrefixWithModels" should "replace enums prefix correctly" in { replaceEnumsPrefixWithModels("com.gilt.commons.enums.v0.enums.Store") should be("com.gilt.commons.enums.v0.models.Store") replaceEnumsPrefixWithModels("com.gilt.commons.enums.v0.models.Store") should be("com.gilt.commons.enums.v0.models.Store") replaceEnumsPrefixWithModels("com.gilt.commons.enums.v0.other.Store") should be("com.gilt.commons.enums.v0.other.Store") replaceEnumsPrefixWithModels("com.gilt.commons.enums.enums.enums.enums") should be("com.gilt.commons.enums.enums.models.enums") replaceEnumsPrefixWithModels("com.gilt.ClassName") should be("com.gilt.ClassName") replaceEnumsPrefixWithModels("com.enums.ClassName") should be("com.models.ClassName") replaceEnumsPrefixWithModels("com.ClassName") should be("com.ClassName") replaceEnumsPrefixWithModels("enums.ClassName") should be("models.ClassName") } }
mbryzek/apidoc-generator
android-generator/src/test/scala/models/generator/android/AndroidJavaUtilTest.scala
Scala
mit
5,612
package java.time.chrono import java.time.temporal._ import java.{util => ju} trait ChronoLocalDate extends Temporal with TemporalAdjuster with Comparable[ChronoLocalDate] { import ChronoField._ def getChronology(): Chronology def getEra(): Era = getChronology().eraOf(get(ERA)) def isLeapYear(): Boolean = getChronology().isLeapYear(get(YEAR)) def lengthOfMonth(): Int def lengthOfYear(): Int def isSupported(field: TemporalField): Boolean = field match { case _: ChronoField => field.isDateBased case null => false case _ => field.isSupportedBy(this) } def isSupported(unit: TemporalUnit): Boolean = unit match { case _: ChronoUnit => unit.isDateBased case null => false case _ => unit.isSupportedBy(this) } override def `with`(adjuster: TemporalAdjuster): ChronoLocalDate = adjuster.adjustInto(this).asInstanceOf[ChronoLocalDate] def `with`(field: TemporalField, value: Long): ChronoLocalDate = field match { case _: ChronoField => throw new UnsupportedTemporalTypeException(s"Unsupported field: $field") case _ => field.adjustInto(this, value) } override def plus(amount: TemporalAmount): ChronoLocalDate = amount.addTo(this).asInstanceOf[ChronoLocalDate] def plus(amount: Long, unit: TemporalUnit): ChronoLocalDate = unit match { case _: ChronoUnit => throw new UnsupportedTemporalTypeException(s"Unsupported unit: $unit") case _ => unit.addTo(this, amount) } override def minus(amount: TemporalAmount): ChronoLocalDate = amount.subtractFrom(this).asInstanceOf[ChronoLocalDate] override def minus(amount: Long, unit: TemporalUnit): ChronoLocalDate = if (amount != Long.MinValue) plus(-amount, unit) else plus(Long.MaxValue, unit).plus(1, unit) // Not implemented // def query[R](query: TemporalQuery[R]): R def adjustInto(temporal: Temporal): Temporal = temporal.`with`(EPOCH_DAY, toEpochDay) def until(end: Temporal, unit: TemporalUnit): Long def until(end: ChronoLocalDate): ChronoPeriod // Not implemented // def format(formatter: java.time.format.DateFormatter): String // TODO // def atTime(localTime: LocalTime): ChronoLocalDateTime[_] def toEpochDay(): Long = getLong(EPOCH_DAY) def compareTo(other: ChronoLocalDate): Int = { val r = toEpochDay.compareTo(other.toEpochDay) if (r == 0) getChronology().compareTo(other.getChronology) else r } def isAfter(other: ChronoLocalDate): Boolean = toEpochDay > other.toEpochDay def isBefore(other: ChronoLocalDate): Boolean = toEpochDay < other.toEpochDay def isEqual(other: ChronoLocalDate): Boolean = other.toEpochDay == toEpochDay override def equals(other: Any): Boolean = other match { case other: ChronoLocalDate => isEqual(other) && getChronology == other.getChronology case _ => false } override def hashCode: Int = super.hashCode } object ChronoLocalDate { private val tlo = new ju.Comparator[ChronoLocalDate] { def compare(date1: ChronoLocalDate, date2: ChronoLocalDate): Int = date1.toEpochDay.compareTo(date2.toEpochDay) } def timeLineOrder(): ju.Comparator[ChronoLocalDate] = tlo def from(temporal: TemporalAccessor): ChronoLocalDate = temporal match { case temporal: ChronoLocalDate => temporal case _ => // TODO: Get correct chronology (needs TemporalQuery) IsoChronology.INSTANCE.date(temporal) } }
sjrd/scala-js-java-time
src/main/scala/java/time/chrono/ChronoLocalDate.scala
Scala
bsd-3-clause
3,476
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.pattern.orchestration import akka.actor._ import akka.testkit.{ImplicitSender, TestKit} import org.scalatest.{FunSpecLike, Matchers} import org.squbs.testkit.SlowTest import org.squbs.testkit.stress._ import org.squbs.testkit.Timeouts._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.language.postfixOps class OrchestratorStressTest extends TestKit(ActorSystem("OrchestrationStressTest")) with ImplicitSender with FunSpecLike with Matchers { val ir = 500 val warmUp = 2 minutes val steady = 3 minutes it(s"Should orchestrate synchronously at $ir submissions/sec, comparing CPU load to the DSL", SlowTest) { val startTime = System.nanoTime() val loadActor = system.actorOf(Props[LoadActor]) val statsActor = system.actorOf(Props[CPUStatsActor]) loadActor ! StartLoad(startTime, ir, warmUp, steady){ system.actorOf(Props[SimpleForComprehensionActor]) ! OrchestrationRequest("SyncLoadTest") } statsActor ! StartStats(startTime, warmUp, steady, 5 seconds) var sumFinishTime = 0l var sumFinishCount = 0l for (i <- 0 to 1) { fishForMessage(warmUp + steady + awaitMax) { case LoadStats(tps) => println(s"Achieved $tps TPS") println(s"Avg time to finish: ${sumFinishTime / (1000000d * sumFinishCount)} ms") tps should be > (ir * 0.95) // Within 5% of IR true case CPUStats(avg, sDev) => println(s"CPULoad $avg; Standard Deviation $sDev") true case FinishedOrchestration(_, _, time) => val currentTime = System.nanoTime() - startTime if (currentTime > warmUp.toNanos && currentTime <= (warmUp + steady).toNanos) { sumFinishTime += time sumFinishCount += 1 } false case _ => false } } loadActor ! PoisonPill statsActor ! PoisonPill } it(s"Should orchestrate asynchronously at $ir submissions/sec", SlowTest) { val startTime = System.nanoTime() val loadActor = system.actorOf(Props[LoadActor]) val statsActor = system.actorOf(Props[CPUStatsActor]) loadActor ! StartLoad(startTime, ir, warmUp, steady){ system.actorOf(Props[TestOrchestrator]) ! OrchestrationRequest("LoadTest") } statsActor ! StartStats(startTime, warmUp, steady, 5 seconds) var sumSubmitTime = 0l var sumSubmitCount = 0l var sumFinishTime = 0l var sumFinishCount = 0l for (i <- 0 to 1) { fishForMessage(warmUp + steady + awaitMax) { case LoadStats(tps) => println(s"Achieved $tps TPS") println(s"Avg submit time: ${sumSubmitTime / (1000000d * sumSubmitCount)} ms") println(s"Avg time to finish: ${sumFinishTime / (1000000d * sumFinishCount)} ms") tps should be > (ir * 0.95) // Within 5% of IR true case CPUStats(avg, sDev) => println(s"CPULoad $avg; Standard Deviation $sDev") true case SubmittedOrchestration(_, time) => val currentTime = System.nanoTime() - startTime if (currentTime > warmUp.toNanos && currentTime <= steady.toNanos) { sumSubmitTime += time sumSubmitCount += 1 } false case FinishedOrchestration(_, _, time) => val currentTime = System.nanoTime() - startTime if (currentTime > warmUp.toNanos && currentTime <= (warmUp + steady).toNanos) { sumFinishTime += time sumFinishCount += 1 } false } } loadActor ! PoisonPill statsActor ! PoisonPill } it(s"Should orchestrate asynchronously using ask at $ir submissions/sec", SlowTest) { val startTime = System.nanoTime() val loadActor = system.actorOf(Props[LoadActor]) val statsActor = system.actorOf(Props[CPUStatsActor]) loadActor ! StartLoad(startTime, ir, warmUp, steady){ system.actorOf(Props[TestAskOrchestrator]) ! OrchestrationRequest("LoadTest") } statsActor ! StartStats(startTime, warmUp, steady, 5 seconds) var sumSubmitTime = 0l var sumSubmitCount = 0l var sumFinishTime = 0l var sumFinishCount = 0l for (i <- 0 to 1) { fishForMessage(warmUp + steady + awaitMax) { case LoadStats(tps) => println(s"Achieved $tps TPS") println(s"Avg submit time: ${sumSubmitTime / (1000000d * sumSubmitCount)} ms") println(s"Avg time to finish: ${sumFinishTime / (1000000d * sumFinishCount)} ms") tps should be > (ir * 0.95) // Within 5% of IR true case CPUStats(avg, sDev) => println(s"CPULoad $avg; Standard Deviation $sDev") true case SubmittedOrchestration(_, time) => val currentTime = System.nanoTime() - startTime if (currentTime > warmUp.toNanos && currentTime <= steady.toNanos) { sumSubmitTime += time sumSubmitCount += 1 } false case finishedF: Future[FinishedOrchestration] => val finished = Await.result(finishedF, warmUp + steady + (20 seconds)) val currentTime = System.nanoTime() - startTime if (currentTime > warmUp.toNanos && currentTime <= (warmUp + steady).toNanos) { sumFinishTime += finished.timeNs sumFinishCount += 1 } false case _ => false } } loadActor ! PoisonPill statsActor ! PoisonPill } } class SimpleForComprehensionActor extends Actor with Orchestrator with RequestFunctions { // Expecting the initial request expectOnce { case OrchestrationRequest(request) => orchestrate(sender(), request) } /** * The "orchestrate" function saves the original requester/sender and processes the orchestration. * @param requester The original sender * @param request The request message */ def orchestrate(requester: ActorRef, request: String): Unit = { import Requests._ val delay = 10 milliseconds val startTime = System.nanoTime() for { v0 <- loadResponse(delay) v1 <- loadResponse1(delay)(v0) v2 <- loadResponse2(delay)(v0, v1) v3 <- loadResponse3(delay)(v0, v1, v2) v4 <- loadResponse4(delay)(v0, v1, v2, v3) v5 <- loadResponse5(delay)(v0, v1, v2, v3, v4) v6 <- loadResponse6(delay)(v0, v1, v2, v3, v4, v5) v7 <- loadResponse7(delay)(v0, v1, v2, v3, v4, v5, v6) v8 <- loadResponse8(delay)(v0, v1, v2, v3, v4, v5, v6, v7) v9 <- loadResponse9(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8) v10 <- loadResponse10(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) v11 <- loadResponse11(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) v12 <- loadResponse12(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) v13 <- loadResponse13(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) v14 <- loadResponse14(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) v15 <- loadResponse15(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) v16 <- loadResponse16(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) v17 <- loadResponse17(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) v18 <- loadResponse18(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) v19 <- loadResponse19(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) v20 <- loadResponse20(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) v21 <- loadResponse21(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) v22 <- loadResponse22(delay)(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) } { requester ! FinishedOrchestration(v22, request, System.nanoTime() - startTime) context stop self } } }
SarathChandran/squbs
squbs-pattern/src/test/scala/org/squbs/pattern/orchestration/OrchestratorStressTest.scala
Scala
apache-2.0
8,686
package com.themillhousegroup.sses import play.api.mvc._ import sun.misc.BASE64Decoder import scala.concurrent.Future object BasicAuthProtected { private lazy val unauthResult = Results.Unauthorized.withHeaders(("WWW-Authenticate", "Basic realm=\\"myRealm\\"")) private lazy val challenge = Future.successful(Some(unauthResult)) //need the space at the end private val basicPrefix = "basic " private val authnHeaderNames = Set("authorization", "Authorization") def withAuthnHeader(request:Request[_]):Option[String] = { val maybeFoundHeaderName = request.headers.keys.intersect(authnHeaderNames).headOption maybeFoundHeaderName.flatMap { authnHeaderName => request.headers.get(authnHeaderName) } } def decodeBasicAuth(auth: String): Option[(String, String)] = { if ((auth.length < basicPrefix.length) || (!auth.toLowerCase.startsWith(basicPrefix))) { None } else { extractEncodedAuthString(auth.drop(basicPrefix.length)) } } private def extractEncodedAuthString(basicAuthSt:String): Option[(String, String)] = { //BASE64Decoder is not thread safe, don't make it a field of this object val decoder = new BASE64Decoder() val decodedAuthSt = new String(decoder.decodeBuffer(basicAuthSt), "UTF-8").trim val usernamePassword = decodedAuthSt.split(":") usernamePassword.length match { case many if (many >= 2) => Some(usernamePassword(0), usernamePassword.splitAt(1)._2.mkString) case 1 if (decodedAuthSt.indexOf(":") > 0) => Some(usernamePassword.head, "") case 1 if (decodedAuthSt.indexOf(":") == 0) => Some("", usernamePassword.head) case _ => None } } } class BasicAuthProtected[R <: Request[_]]( credentialMatcher: (String, String) => Boolean) extends ActionBuilder[Request] with ActionFilter[Request] { import BasicAuthProtected._ protected def filter[A](request: Request[A]): Future[Option[Result]] = { withAuthnHeader(request).fold[Future[Option[Result]]] { challenge } { basicAuth => decodeBasicAuth(basicAuth).fold[Future[Option[Result]]] { challenge } { case (user, pass) => if (credentialMatcher(user, pass)) { Future.successful[Option[Result]](None) } else { challenge } } } } } object UsernameProtected { def apply[A, R[A] <: Request[A]](requiredUsername: String):ActionBuilder[Request] = { def matcher(username:String, password:String) = username == requiredUsername new BasicAuthProtected[R[A]](matcher) } } object PasswordProtected { def apply[A, R[A] <: Request[A]](requiredPassword: String):ActionBuilder[Request] = { def matcher(username:String, password:String) = password == requiredPassword new BasicAuthProtected[R[A]](matcher) } } object UsernamePasswordProtected { def apply[A, R[A] <: Request[A]](requiredUsername: String, requiredPassword: String):ActionBuilder[Request] = { def matcher(username:String, password:String) = { username == requiredUsername && password == requiredPassword } new BasicAuthProtected[R[A]](matcher) } }
themillhousegroup/sses
src/main/scala/com/themillhousegroup/sses/BasicAuthProtected.scala
Scala
mit
3,128
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.nn.ops import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.utils.T import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest import org.scalatest.{FlatSpec, Matchers} class IndicatorColSpec extends FlatSpec with Matchers { "IndicatorColSpec Operation with isCount=true" should "work correctly" in { val input = Tensor.sparse( Array(Array(0, 1, 1, 2, 2, 3, 3, 3), Array(0, 0, 3, 0, 1, 0, 1, 2)), Array(3, 1, 2, 0, 3, 1, 2, 2), Array(4, 4) ) val expectedOutput = Tensor[Double]( T(T(0, 0, 0, 1), T(0, 1, 1, 0), T(1, 0, 0, 1), T(0, 1, 2, 0))) val output = IndicatorCol[Double]( feaLen = 4, isCount = true ).forward(input) output should be(expectedOutput) } "IndicatorColSpec Operation with isCount=false" should "work correctly" in { val input = Tensor.sparse( Array(Array(0, 1, 1, 2, 2, 3, 3, 3), Array(0, 0, 3, 0, 1, 0, 1, 2)), Array(3, 1, 2, 0, 3, 1, 2, 2), Array(4, 4) ) val expectedOutput = Tensor[Float]( T(T(0, 0, 0, 1), T(0, 1, 1, 0), T(1, 0, 0, 1), T(0, 1, 1, 0))) val output = IndicatorCol[Float]( feaLen = 4, isCount = false ).forward(input) output should be(expectedOutput) } } class IndicatorColSerialTest extends ModuleSerializationTest { override def test(): Unit = { val indicatorCol = IndicatorCol[Float]( feaLen = 4, isCount = true ).setName("indicatorCol") val input = Tensor.sparse( Array(Array(0, 1, 1, 2, 2, 3, 3, 3), Array(0, 0, 3, 0, 1, 0, 1, 2)), Array(3, 1, 2, 0, 3, 1, 2, 2), Array(4, 4) ) runSerializationTest(indicatorCol, input) } }
intel-analytics/BigDL
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/ops/IndicatorColSpec.scala
Scala
apache-2.0
2,419
/* * Copyright 2015-2016 David R. Bild * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.nscala_money.money import scala.math.BigDecimal.RoundingMode.RoundingMode private[money] trait Conversions extends Universal { implicit def convertRoundingMode(roundingMode: RoundingMode): java.math.RoundingMode = java.math.RoundingMode.valueOf(roundingMode.id) }
nscala-money/nscala-money
core/src/main/scala/com/github/nscala_money/money/Conversions.scala
Scala
apache-2.0
894
import java.io.File import testgen.TestSuiteBuilder.{toString, _} import testgen._ object ConnectTestGenerator { def main(args: Array[String]): Unit = { val file = new File("src/main/resources/connect.json") def toString(expected: CanonicalDataParser.Expected): String = { expected match { case Right("") => "None" case Right("X") => s"""Some(Color.Black)""" case Right("O") => s"""Some(Color.White)""" case _ => throw new IllegalArgumentException } } def sutArgsFromInput(parseResult: CanonicalDataParser.ParseResult, argNames: String*): String = argNames map (name => toArgString(parseResult("input").asInstanceOf[Map[String, Any]](name))) mkString(", ") def toArgString(any: Any): String = { any match { case list: List[_] => val vals = list.map(s => TestSuiteBuilder.toString(s)).mkString(", ") s"mkBoard(List($vals))" case _ => any.toString } } def fromLabeledTestFromInput(argNames: String*): ToTestCaseData = withLabeledTest { sut => labeledTest => val args = sutArgsFromInput(labeledTest.result, argNames: _*) val property = labeledTest.property val sutCall = s"""$sut($args).$property""" val expected = toString(labeledTest.expected) TestCaseData(labeledTest.description, sutCall, expected) } val code = TestSuiteBuilder.build(file, fromLabeledTestFromInput("board"), Seq(), Seq("// Filter readable board into valid input", "private def mkBoard(lines: List[String]): List[String] =", "lines.map(l => l.filter(!_.isSpaceChar))")) println(s"-------------") println(code) println(s"-------------") } }
ricemery/xscala
testgen/src/main/scala/ConnectTestGenerator.scala
Scala
mit
1,776
package BIDMat abstract class CLSpec extends BIDMatSpec { override def beforeAll { super.beforeAll Mat.useOpenCL = true Mat.checkOpenCL(true) } override def afterAll { Mat.clHandle.free() } }
phlip9/BIDMat
src/test/scala/BIDMat/CLSpec.scala
Scala
bsd-3-clause
221
// demonstrates selection on non-path types. Needs to be fleshed out to // become a real test. object Test { class C { type T val f: T => T = ??? } var x = new C val y = x.f }
densh/dotty
tests/pending/pos/depsel.scala
Scala
bsd-3-clause
196
/* * Copyright 2017 Datamountaineer. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datamountaineer.streamreactor.connect.pulsar.source import java.util import java.util.Collections import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.pulsar.config.{PulsarConfigConstants, PulsarSourceConfig, PulsarSourceSettings} import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.slf4j.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.source.SourceConnector import scala.collection.JavaConverters._ import scala.collection.JavaConversions._ class PulsarSourceConnector extends SourceConnector with StrictLogging { private val configDef = PulsarSourceConfig.config private var configProps: util.Map[String, String] = _ private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) /** * States which SinkTask class to use **/ override def taskClass(): Class[_ <: Task] = classOf[PulsarSourceTask] /** * Set the configuration for each work and determine the split * * @param maxTasks The max number of task workers be can spawn * @return a List of configuration properties per worker **/ override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Setting task configurations for $maxTasks workers.") // call settings here makes sure we don't have an exclusive subscription over more than one worker PulsarSourceSettings(PulsarSourceConfig(configProps), maxTasks) // distribute all kcqls to all workers and let the Pulsar subscription type handle the routing (1 to maxTasks).map(_ => configProps).toList } /** * Start the sink and set to configuration * * @param props A map of properties for the connector and worker **/ override def start(props: util.Map[String, String]): Unit = { logger.info(s"Starting Pulsar source connector.") configProps = props } override def stop(): Unit = {} override def config(): ConfigDef = configDef override def version(): String = manifest.version() }
CodeSmell/stream-reactor
kafka-connect-pulsar/src/main/scala/com/datamountaineer/streamreactor/connect/pulsar/source/PulsarSourceConnector.scala
Scala
apache-2.0
2,770
package com.ubirch.avatar.core.device import com.ubirch.avatar.core.test.model.DummyDeviceHistory import com.ubirch.avatar.core.test.util.DeviceHistoryTestUtil import com.ubirch.avatar.model.rest.device.DeviceHistory import com.ubirch.avatar.test.base.ElasticsearchSpec import com.ubirch.util.json.MyJsonProtocol import com.ubirch.util.uuid.UUIDUtil import org.joda.time.{DateTime, DateTimeZone} import java.util.UUID import scala.concurrent.Await import scala.concurrent.duration._ import scala.language.postfixOps /** * author: cvandrei * since: 2016-10-25 */ class DeviceHistoryManagerSpec extends ElasticsearchSpec with MyJsonProtocol { //Ignoring tests, as deviceHistoryIndex is not being used anymore. ignore("store()") { scenario("messageId does not exist before") { // prepare val deviceHistory = DummyDeviceHistory.data() // test val response = DeviceHistoryManager.store(deviceHistory).get Thread.sleep(1500) // verify val deviceHistoryList = Await.result(DeviceHistoryManager.history(response.deviceId), 2 seconds) deviceHistoryList.size should be(1) deviceHistoryList.head should be(response) } scenario("make sure that messageId is ignored: try to store object with same messageId twice") { // prepare val deviceHistory1 = DummyDeviceHistory.data() val storedDeviceHistory1 = DeviceHistoryManager.store(deviceHistory1).get val deviceHistory2 = DummyDeviceHistory.data( deviceId = storedDeviceHistory1.deviceId, messageId = storedDeviceHistory1.messageId ) // test val response = DeviceHistoryManager.store(deviceHistory2).get Thread.sleep(1500) // verify val deviceHistoryList = Await.result(DeviceHistoryManager.history(deviceHistory2.deviceId), 1 seconds) deviceHistoryList.size should be(2) deviceHistoryList.head should be(response) deviceHistoryList(1) should be(storedDeviceHistory1) } } //Ignoring tests, as deviceHistoryIndex is not being used anymore. ignore("history()") { scenario("deviceId empty") { an[IllegalArgumentException] should be thrownBy Await.result(DeviceHistoryManager.history(""), 1 seconds) } scenario("deviceId does not exist; index does not exist") { val deviceId = UUIDUtil.uuidStr Await.result(DeviceHistoryManager.history(deviceId), 1 seconds) should be('isEmpty) } scenario("deviceId does not exist; index exists") { // prepare DeviceHistoryTestUtil.storeSeries(1) val deviceId = UUIDUtil.uuidStr // test val result: Seq[DeviceHistory] = Await.result(DeviceHistoryManager.history(deviceId), 2 seconds) // verify result should be('isEmpty) } scenario("3 records exist: from = -1; size > 3") { testWithInvalidFromOrSize(from = -1, size = 4) } scenario("3 records exist: from = 0; size = -1") { testWithInvalidFromOrSize(from = 0, size = -1) } scenario("3 records exist: from = 0; size = 0") { // prepare val elementCount = 3 val from = 0 val size = 0 val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(elementCount) val deviceId: String = dataSeries.head.deviceId // test val result: Seq[DeviceHistory] = Await.result(DeviceHistoryManager.history(deviceId, from, size), 2 seconds) // verify result should be('isEmpty) } scenario("3 records exist: from = 0; size > 3") { // prepare val elementCount = 3 val from = 0 val size = elementCount + 1 val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(elementCount).reverse val deviceId: String = dataSeries.head.deviceId // test val result: Seq[DeviceHistory] = Await.result(DeviceHistoryManager.history(deviceId, from, size), 2 seconds) // verify result.size should be(3) for (i <- dataSeries.indices) { result(i) shouldEqual dataSeries(i) } } scenario("3 records exist: from = 1; size > 3") { // prepare val elementCount = 3 val from = 1 val size = elementCount + 1 val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(elementCount).reverse val deviceId: String = dataSeries.head.deviceId // test val result: Seq[DeviceHistory] = Await.result(DeviceHistoryManager.history(deviceId, from, size), 2 seconds) // verify result.size should be(2) result.head shouldEqual dataSeries(1) result(1) shouldEqual dataSeries(2) } scenario("3 records exist: from = 3; size > 3") { // prepare val elementCount = 3 val from = elementCount val size = elementCount + 1 val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(elementCount) val deviceId: String = dataSeries.head.deviceId // test val result: Seq[DeviceHistory] = Await.result(DeviceHistoryManager.history(deviceId, from, size), 2 seconds) // verify result should be('isEmpty) } } //Ignoring tests, as deviceHistoryIndex is not being used anymore. ignore("byDate()") { scenario("deviceId does not exist; index does not exist") { deleteIndices() val result = Await.result(DeviceHistoryManager.byDate(UUIDUtil.uuid, DateTime.now, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("deviceId does not exist; index exists") { val result = Await.result(DeviceHistoryManager.byDate(UUIDUtil.uuid, DateTime.now, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("all 3 records in interval") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val from = dataSeries.head.timestamp.minusSeconds(10) val to = dataSeries.last.timestamp.plusSeconds(10) // test val result = Await.result(DeviceHistoryManager.byDate(deviceId, from, to), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("5000 records in interval (ensure that Elasticsearch page size is not the default)") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(5000) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val from = dataSeries.head.timestamp.minusSeconds(10) val to = dataSeries.last.timestamp.plusSeconds(10) // test val result = Await.result(DeviceHistoryManager.byDate(deviceId, from, to), 3 seconds) // verify result.size should be(dataSeries.size) } scenario("all records in interval; first at lower boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val from = dataSeries.head.timestamp val to = dataSeries.last.timestamp.plusSeconds(10) // test val result = Await.result(DeviceHistoryManager.byDate(deviceId, from, to), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("all records in interval; except for: first before lower boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val from = dataSeries.head.timestamp.plusMillis(1) val to = dataSeries.last.timestamp.plusSeconds(10) // test val result = Await.result(DeviceHistoryManager.byDate(deviceId, from, to), 1 seconds) // verify result should be(dataSeries.tail.reverse) } scenario("all records in interval; last at upper boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val from = dataSeries.head.timestamp.minusSeconds(10) val to = dataSeries.last.timestamp // test val result = Await.result(DeviceHistoryManager.byDate(deviceId, from, to), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("all records in interval; except for: last after upper boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val from = dataSeries.head.timestamp.minusSeconds(10) val to = dataSeries.last.timestamp.minusMillis(1) // test val result = Await.result(DeviceHistoryManager.byDate(deviceId, from, to), 1 seconds) // verify val expected = Seq(dataSeries.head, dataSeries(1)).reverse result should be(expected) } } //Ignoring tests, as deviceHistoryIndex is not being used anymore. ignore("before()") { scenario("deviceId does not exist; index does not exist") { deleteIndices() val result = Await.result(DeviceHistoryManager.before(UUIDUtil.uuid, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("deviceId does not exist; index exists") { val result = Await.result(DeviceHistoryManager.before(UUIDUtil.uuid, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("all 3 records in interval") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val before = dataSeries.last.timestamp.plusSeconds(10) // test val result = Await.result(DeviceHistoryManager.before(deviceId, before), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("5000 records in interval (ensure that Elasticsearch page size is not the default)") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(5000) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val before = dataSeries.last.timestamp.plusSeconds(10) // test val result = Await.result(DeviceHistoryManager.before(deviceId, before), 3 seconds) // verify result.size should be(dataSeries.size) } scenario("all records in interval; except for: last at upper boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val before = dataSeries.last.timestamp // test val result = Await.result(DeviceHistoryManager.before(deviceId, before), 1 seconds) // verify val expected = Seq(dataSeries.head, dataSeries(1)).reverse result should be(expected) } scenario("all records in interval; except for: last after upper boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val before = dataSeries.last.timestamp.minusMillis(1) // test val result = Await.result(DeviceHistoryManager.before(deviceId, before), 1 seconds) // verify val expected = Seq(dataSeries.head, dataSeries(1)).reverse result should be(expected) } } //Ignoring tests, as deviceHistoryIndex is not being used anymore. ignore("after()") { scenario("deviceId does not exist; index does not exist") { deleteIndices() val result = Await.result(DeviceHistoryManager.after(UUIDUtil.uuid, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("deviceId does not exist; index exists") { val result = Await.result(DeviceHistoryManager.after(UUIDUtil.uuid, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("all 3 records in interval") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val after = dataSeries.head.timestamp.minusSeconds(10) // test val result = Await.result(DeviceHistoryManager.after(deviceId, after), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("5000 records in interval (ensure that Elasticsearch page size is not the default)") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(5000) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val after = dataSeries.head.timestamp.minusSeconds(10) // test val result = Await.result(DeviceHistoryManager.after(deviceId, after), 3 seconds) // verify result.size should be(dataSeries.size) } scenario("all records in interval; first at lower boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val after = dataSeries.head.timestamp.minusSeconds(10) // test val result = Await.result(DeviceHistoryManager.after(deviceId, after), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("all records in interval; except for: first before lower boundary") { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(3) val deviceId: UUID = UUIDUtil.fromString(dataSeries.head.deviceId) val after = dataSeries.head.timestamp.plusMillis(1) // test val result = Await.result(DeviceHistoryManager.after(deviceId, after), 1 seconds) // verify result should be(dataSeries.tail.reverse) } } //Ignoring tests, as deviceHistoryIndex is not being used anymore. ignore("byDay()") { scenario("deviceId does not exist; index does not exist") { deleteIndices() val result = Await.result(DeviceHistoryManager.byDay(UUIDUtil.uuid, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("deviceId does not exist; index exists") { val result = Await.result(DeviceHistoryManager.byDay(UUIDUtil.uuid, DateTime.now), 1 seconds) result should be('isEmpty) } scenario("all 3 records in interval (on the first millisecond of the day)") { // prepare val deviceId: UUID = UUIDUtil.uuid val dayBegins = firstMillisecondOfToday() val t1 = dayBegins.plusHours(2) val t2 = dayBegins.plusHours(4).plusMinutes(10).plusSeconds(5).plusMillis(23) val t3 = dayBegins.plusHours(21).plusMinutes(42).plusSeconds(27).plusMillis(46) val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeTimeBasedSeries(deviceId, Seq(t1, t2, t3)) // test val result = Await.result(DeviceHistoryManager.byDay(deviceId, dayBegins), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("all 3 records in interval (on the last millisecond of the day)") { // prepare val deviceId: UUID = UUIDUtil.uuid val dayBegins = firstMillisecondOfToday() val dayEnds = dayBegins.plusDays(1).minusMillis(1) val t1 = dayBegins.plusHours(2) val t2 = dayBegins.plusHours(4).plusMinutes(10).plusSeconds(5).plusMillis(23) val t3 = dayBegins.plusHours(21).plusMinutes(42).plusSeconds(27).plusMillis(46) val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeTimeBasedSeries(deviceId, Seq(t1, t2, t3)) // test val result = Await.result(DeviceHistoryManager.byDay(deviceId, dayEnds), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("5000 records in interval (ensure that Elasticsearch page size is not the default)") { // prepare val deviceId: UUID = UUIDUtil.uuid val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries( elementCount = 5000, deviceId = deviceId.toString ) val day = dataSeries.head.timestamp // test val result = Await.result(DeviceHistoryManager.byDay(deviceId, day), 3 seconds) // verify result.size should be(dataSeries.size) } scenario("all records in interval; first at lower boundary") { // prepare val deviceId: UUID = UUIDUtil.uuid val midnight = firstMillisecondOfToday() val t1 = midnight val t2 = midnight.plusHours(4).plusMinutes(10).plusSeconds(5).plusMillis(23) val t3 = midnight.plusHours(21).plusMinutes(42).plusSeconds(27).plusMillis(46) val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeTimeBasedSeries(deviceId, Seq(t1, t2, t3)) val day = midnight.plusHours(7) // test val result = Await.result(DeviceHistoryManager.byDay(deviceId, day), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("all records in interval; except for: first before lower boundary") { // prepare val deviceId: UUID = UUIDUtil.uuid val midnight = firstMillisecondOfToday() val t1 = midnight.minusMillis(1) val t2 = midnight.plusHours(4).plusMinutes(10).plusSeconds(5).plusMillis(23) val t3 = midnight.plusHours(21).plusMinutes(42).plusSeconds(27).plusMillis(46) val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeTimeBasedSeries(deviceId, Seq(t1, t2, t3)) val day = midnight.plusHours(7) // test val result = Await.result(DeviceHistoryManager.byDay(deviceId, day), 1 seconds) // verify result should be(dataSeries.tail.reverse) } scenario("all records in interval; last at upper boundary") { // prepare val deviceId: UUID = UUIDUtil.uuid val midnight = firstMillisecondOfToday() val t1 = midnight.plusHours(2) val t2 = midnight.plusHours(4).plusMinutes(10).plusSeconds(5).plusMillis(23) val t3 = midnight.plusDays(1).minusMillis(1) val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeTimeBasedSeries(deviceId, Seq(t1, t2, t3)) val day = midnight.plusHours(7) // test val result = Await.result(DeviceHistoryManager.byDay(deviceId, day), 1 seconds) // verify result should be(dataSeries.reverse) } scenario("all records in interval; except for: last after upper boundary") { // prepare val deviceId: UUID = UUIDUtil.uuid val midnight = firstMillisecondOfToday() val t1 = midnight.plusHours(2) val t2 = midnight.plusHours(4).plusMinutes(10).plusSeconds(5).plusMillis(23) val t3 = midnight.plusDays(1) val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeTimeBasedSeries(deviceId, Seq(t1, t2, t3)) val day = midnight.plusHours(7) // test val result = Await.result(DeviceHistoryManager.byDay(deviceId, day), 1 seconds) // verify val expected = Seq(dataSeries.head, dataSeries(1)).reverse result should be(expected) } } private def testWithInvalidFromOrSize(from: Int, size: Int, elementCount: Int = 3) = { // prepare val dataSeries: Seq[DeviceHistory] = DeviceHistoryTestUtil.storeSeries(elementCount) val deviceId: String = dataSeries.head.deviceId // test && verify an[IllegalArgumentException] should be thrownBy Await.result(DeviceHistoryManager.history(deviceId, from, size), 1 seconds) } private def firstMillisecondOfToday(): DateTime = { DateTime.now(DateTimeZone.UTC) .withHourOfDay(0) .withMinuteOfHour(0) .withSecondOfMinute(0) .withMillisOfSecond(0) } }
ubirch/ubirch-avatar-service
core/src/test/scala/com/ubirch/avatar/core/device/DeviceHistoryManagerSpec.scala
Scala
apache-2.0
19,788
class S { J.foo(null) // Java static methods are also nullified }
som-snytt/dotty
tests/explicit-nulls/pos/interop-static-src/S.scala
Scala
apache-2.0
71
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.v2.csv import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.StructFilters import org.apache.spark.sql.connector.read.{Scan, SupportsPushDownFilters} import org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex import org.apache.spark.sql.execution.datasources.v2.FileScanBuilder import org.apache.spark.sql.sources.Filter import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.CaseInsensitiveStringMap case class CSVScanBuilder( sparkSession: SparkSession, fileIndex: PartitioningAwareFileIndex, schema: StructType, dataSchema: StructType, options: CaseInsensitiveStringMap) extends FileScanBuilder(sparkSession, fileIndex, dataSchema) with SupportsPushDownFilters { override def build(): Scan = { CSVScan( sparkSession, fileIndex, dataSchema, readDataSchema(), readPartitionSchema(), options, pushedFilters()) } private var _pushedFilters: Array[Filter] = Array.empty override def pushFilters(filters: Array[Filter]): Array[Filter] = { if (sparkSession.sessionState.conf.csvFilterPushDown) { _pushedFilters = StructFilters.pushedFilters(filters, dataSchema) } filters } override def pushedFilters(): Array[Filter] = _pushedFilters }
maropu/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/csv/CSVScanBuilder.scala
Scala
apache-2.0
2,155
package org.eichelberger.sfc.utils import com.typesafe.scalalogging.slf4j.LazyLogging import org.junit.runner.RunWith import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner import BitManipulations._ @RunWith(classOf[JUnitRunner]) class BitManipulationsTest extends Specification with LazyLogging { "static methods" should { "usedMask" >> { // single bits for (pos <- 0 to 62) { val v = 1L << pos.toLong val actual = usedMask(v) val expected = (1L << (pos + 1L)) - 1L println(s"[usedMask single bit] pos $pos, value $v, actual $actual, expected $expected") actual must equalTo(expected) } // full bit masks for (pos <- 0 to 62) { val expected = (1L << (pos.toLong + 1L)) - 1L val actual = usedMask(expected) println(s"[usedMask full bit masks] pos $pos, value $expected, actual $actual, expected $expected") actual must equalTo(expected) } usedMask(0) must equalTo(0) } "sharedBitPrefix" >> { sharedBitPrefix(2, 3) must equalTo(2) sharedBitPrefix(178, 161) must equalTo(160) } "common block extrema" >> { commonBlockMin(178, 161) must equalTo(160) commonBlockMax(178, 161) must equalTo(191) } } }
cne1x/sfseize
src/test/scala/org/eichelberger/sfc/utils/BitManipulationsTest.scala
Scala
apache-2.0
1,293
package com.gilesc package arrow import cats.Monad import cats.data.Kleisli abstract class Service[F[_]: Monad, Req, Resp] { def apply(): Kleisli[F, Req, Resp] = Kleisli.apply(run) def run(req: Req): F[Resp] def andThen[C]( service: Service[F, Resp, C] ): Kleisli[F, Req, C] = this.apply().andThen(service.run _) } object Service { def apply[F[_]: Monad, Req, Resp](f: Req => F[Resp]): Service[F, Req, Resp] = { new Service[F, Req, Resp] { override def run(req: Req) = f(req) } } }
CraigGiles/mynab
arrow/src/main/scala/com/gilesc/arrow/Service.scala
Scala
mit
518
package com.cloudray.scalapress.plugin.ecommerce.tag import org.scalatest.{FlatSpec, OneInstancePerTest} import org.scalatest.mock.MockitoSugar import com.cloudray.scalapress.plugin.ecommerce.tags.BasketLineVariationTag import com.cloudray.scalapress.plugin.ecommerce.domain.BasketLine import com.cloudray.scalapress.item.Item import javax.servlet.http.HttpServletRequest import com.cloudray.scalapress.plugin.variations.{Variation, Dimension, DimensionValue} import com.cloudray.scalapress.framework.{ScalapressRequest, ScalapressContext} /** @author Stephen Samuel */ class BasketLineVariationTagTest extends FlatSpec with MockitoSugar with OneInstancePerTest { val line1 = new BasketLine line1.obj = new Item line1.obj.name = "coldplay tickets riverside stadium" line1.obj.stock = 55 val dv1 = new DimensionValue dv1.dimension = new Dimension dv1.dimension.id = 4 dv1.value = "green" val dv2 = new DimensionValue dv2.dimension = new Dimension dv2.dimension.id = 5 dv2.value = "latern" val tag = new BasketLineVariationTag() val req = mock[HttpServletRequest] val context = mock[ScalapressContext] val sreq = new ScalapressRequest(req, context).withLine(line1) "a BasketLineVariationTag" should "render the variation description" in { line1.variation = new Variation line1.variation.dimensionValues.add(dv1) line1.variation.dimensionValues.add(dv2) val render = tag.render(sreq) assert("green latern" === render.get) } "a BasketLineVariationTag" should "render nothing when no variation is set" in { val render = tag.render(sreq) assert(render.isEmpty) } }
vidyacraghav/scalapress
src/test/scala/com/cloudray/scalapress/plugin/ecommerce/tag/BasketLineVariationTagTest.scala
Scala
apache-2.0
1,635
package service import com.google.inject.Inject import models.Teaser import play.api.i18n.Lang import scala.concurrent.{ExecutionContext, Future} trait CarouselService { def carousel(implicit lang: Lang, ec: ExecutionContext): Future[Seq[Teaser]] } class CarouselServiceImpl @Inject()(ws: WsConsumer) extends CarouselService with PlayPort { override def carousel(implicit lang: Lang, ec: ExecutionContext) = ws.fetch[Seq[Teaser]](s"http://localhost:$port/mock/carousel") }
splink/pagelets-seed
app/service/CarouselService.scala
Scala
apache-2.0
484
package modulo_app import core_app._ import config_app._ import errores_app._ import java.io.{File, FileInputStream, FileOutputStream} import scala.swing.TextArea import scala.util.{Try, Success, Failure} import java.util.regex.{Matcher, Pattern} trait Modulo { def startFunction : Unit /** * Funcion = getFilesFrom * Obtiene todos los archivos de un directorio dado. */ def getFilesFrom(dir: String): List[File] = { val d = new File(dir) if (d.exists && d.isDirectory){ val tipo = ConfigFile.getFileType.toUpperCase val query = if (tipo.isEmpty) "." else s".$tipo" d.listFiles.filter(_.isFile).toList.filter(_.getName.toUpperCase.contains(query)) } else List[File]() } /** * Funcion = deleteFolder * Elimina los archivos recursivos a una carpeta dada. */ def deleteFolder(destino: File) : Boolean = { def deleteAll (file: File) : Boolean = { if (file.isDirectory) file.listFiles.foreach(deleteAll) !(file.exists && !file.delete) } deleteAll(destino) } } class Ordenador (ruta : String, out : TextArea, dest : Option[String] = None) extends Modulo { var index: Int = 0 var restantes : List[File] = Nil var ordenados : List[(File, String)] = Nil var total : Int = 0 val folders : List[Folder] = ConfigFile.getFolderList val carpeta_destino : String = dest.getOrElse(ConfigFile.getFolder()) /** * Funcion = startFunction * Funcion principal que recupera los valores configurados y procesa cada archivo. */ def startFunction = { val filtros : List[FileKeyword] = ConfigFile.getFilters val tamFiltros = filtros.size var i : Int = 0 restantes = getFiles(ruta).sorted total = restantes.size for (i <- 0 to tamFiltros) { if (i == tamFiltros) // llego al ultimo, ejecuto los restantes filtrar(".", "", "INICIO") else filtrar(filtros(i).keyword, filtros(i).folder, filtros(i).match_pos) } val dest_folder = new File(ruta + "\\\\" + carpeta_destino) Try(deleteFolder(dest_folder)) match { case Failure(e) => out.append(e.getMessage) case Success(s) => if (!s) throw ErrorCrearCarpeta("No se pudo limpiar la carpeta destino") val p : Int = createFiles(dest_folder) if (p > 0) { out.append(s"($p/$total) archivos fueron organizados\\n") if (p == total) out.append("La operacion se completo satisfactoriamente\\n") else { out.append(s"Faltan ordenar ${total - p} archivos") throw ErrorTerminarProceso("No se procesaron todos los archivos") } } } } /** * Funcion = getFiles * Recupera todos los archivos de todas las carpetas involucradas para ordenarlos * Devuelve una lista */ private def getFiles(r: String): List[File] = ConfigFile.getFolderList.flatMap(folder => getFilesFrom(r + "\\\\" + folder.name)) /** * Funcion = filtrar * En base a una palabra, se fija en todos los archivos que estan en la lista 'restantes'. * Si encuentra algun archivo que matche con la palabra, se agrega a otra lista 'ordenados'. * Al final, la lista 'restantes' se reduce con los que encontro y la lista 'ordenados' crece. */ private def filtrar (word: String, folder: String, pos: String): Unit = { // Cada vez que encuentre un *** dentro del keyword, lo reemplazo con .* val regex = word.replaceAllLiterally("***", ".*") + ".*" val patron = Pattern.compile(regex) /** * Funcion = criteria * Valida si el nombre de un archivo cumple con un regex. */ def criteria(x: File) : Boolean = { if (folder.isEmpty) return true val matcher = patron.matcher(x.getName) if (matcher.find) { val p = if (matcher.start == 0) "INICIO" else "MEDIO" x.getParentFile.getName == folder && p == pos } else false } var filtrados: List[File] = Nil for (f <- restantes.filter(criteria).sortBy(_.getName)){ ordenados = (f, getNewName(index, f.getParentFile.getName, f.getName)) :: ordenados index += 1 filtrados = f :: filtrados } restantes = restantes filterNot filtrados.contains } /** * Funcion = createFiles * Crea la carpeta y los archivos ordenados dentro con su secuencia asignada. * Devuelve la cantidad procesada. */ private def createFiles(dir: File) : Int = { var procesados = 0 if (ordenados.size == 0) throw ErrorCrearCarpeta("No existen archivos para ordenar") if (dir.mkdir) out.append(s"Directorio '${dir.getName}' creado.\\n") else throw ErrorCrearCarpeta("No se limpio la carpeta destino") out.append(s"Archivos creados:\\n") for ((src, name) <- ordenados.sortBy(_._2)){ val destin = new File(dir.getPath + "\\\\" + name) val fos = new FileOutputStream(destin) val fin = new FileInputStream(src) Try(fos.getChannel.transferFrom( fin.getChannel, 0, Long.MaxValue)) match { case Success(_) => procesados += 1 case Failure(_) => procesados += 0 } fos.close // IMPORTANTE CERRAR EL ARCHIVO fin.close // IMPORTANTE CERRAR EL ARCHIVO out.append(" " + name + "\\n") } procesados } /** * Funcion = getNewName * Construye el nuevo nombre del archivo, concatenando el indice con el prefijo (si es necesario) y su nombre original */ private def getNewName (i: Int, prefix: String, filename: String) : String = { if (folders.filter(_.name == prefix).map(_.show).head) getIndex(i,total) + "_" + prefix + "_" + filename else getIndex(i,total) + "_" + filename } /** * Funcion = getIndex * Construye el indice del archivo */ private def getIndex(i: Int, max: Int) = "0" * (max.toString.length - i.toString.length) + i.toString }
EricQuinterX/ordenador-de-archivos
src/main/scala/funciones.scala
Scala
apache-2.0
5,845
/* __ *\\ ** ________ ___ / / ___ __ ____ Scala.js API ** ** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | |__/ /____/ ** ** |/____/ ** \\* */ package niocharset import java.nio.charset._ /** Standard charsets. * This is basically the same as [[java.nio.charset.StandardCharsets]], but * it is also available when compiling with a JDK 6. */ object StandardCharsets { /** ISO-8859-1, aka latin1. */ def ISO_8859_1: Charset = niocharset.ISO_8859_1 /** US-ASCII. */ def US_ASCII: Charset = niocharset.US_ASCII /** UTF-8. */ def UTF_8: Charset = niocharset.UTF_8 /** UTF-16 Big Endian without BOM. */ def UTF_16BE: Charset = niocharset.UTF_16BE /** UTF-16 Little Endian without BOM. */ def UTF_16LE: Charset = niocharset.UTF_16LE /** UTF-16 with an optional BOM. * When encoding, Big Endian is always used. * When decoding, the BOM specifies what endianness to use. If no BOM is * found, it defaults to Big Endian. */ def UTF_16: Charset = niocharset.UTF_16 }
cedricviaccoz/scala-native
javalib/src/main/scala/niocharset/StandardCharsets.scala
Scala
bsd-3-clause
1,396
package uk.co.turingatemyhamster package owl2 package ast /** * * * @author Matthew Pocock */ trait ObjectPropertyCardinalityRestrictionsModuleImpl extends owl2.ObjectPropertyCardinalityRestrictionsModule { importedModules : owl2.EntitiesLiteralsAnonymousIndividualsModule with owl2.IriModule { type UnlimitedNatural = BigInt type ClassExpression = ast.ClassExpression } => override final type ObjectMaxCardinality = ast.ObjectMaxCardinality override final type ObjectMinCardinality = ast.ObjectMinCardinality override final type ObjectExactCardinality = ast.ObjectExactCardinality } case class ObjectMaxCardinality(objectPropertyExpression: ObjectPropertyExpression, classExpression: Option[ClassExpression] = None, cardinality: BigInt) extends ClassExpression case class ObjectMinCardinality(objectPropertyExpression: ObjectPropertyExpression, classExpression: Option[ClassExpression] = None, cardinality: BigInt) extends ClassExpression case class ObjectExactCardinality(objectPropertyExpression: ObjectPropertyExpression, classExpression: Option[ClassExpression] = None, cardinality: BigInt) extends ClassExpression
drdozer/owl2
core/src/main/scala/uk/co/turingatemyhamster/owl2/ast/PropertyCardinalityRestrictionsModuleAst.scala
Scala
apache-2.0
1,347
/* * Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow.enrich package hadoop package outputs // Cascading import cascading.tuple.Fields import cascading.tuple.TupleEntry import cascading.tap.partition.Partition // Scala import scala.collection.JavaConversions._ // Scalaz import scalaz._ import Scalaz._ // Iglu import com.snowplowanalytics.iglu.client.SchemaKey /** * Custom Partition to write out our JSONs into * schema-delimited paths. */ class ShreddedPartition(val partitionFields: Fields) extends Partition { def getPartitionFields(): Fields = partitionFields def getPathDepth(): Int = 4 // vendor/name/format/version def toPartition(tupleEntry: TupleEntry): String = { val fields = tupleEntry.asIterableOf(classOf[String]).toList val schemaUri = fields match { case key :: Nil => key case _ => throw new IllegalArgumentException(s"ShreddedPartition expects 1 field; got ${fields.size}") } // Round-tripping through a SchemaKey ensures we have a valid path SchemaKey.parse(schemaUri) match { case Failure(err) => throw new IllegalArgumentException("ShreddedPartition expects a valid Iglu-format URI as its path; got: ${err}") case Success(key) => key.toPath } } def toTuple(partition: String, tupleEntry: TupleEntry): Unit = throw new RuntimeException("ShreddedPartition's toTuple for reading not implemented") }
mdavid/lessig-bigdata
lib/snowplow/3-enrich/scala-hadoop-shred/src/main/scala/com.snowplowanalytics.snowplow.enrich/hadoop/outputs/ShreddedPartition.scala
Scala
mit
2,098
import sbt._ import Keys._ object MacrosActivatorBuild extends Build { import BuildSettings._ lazy val root: Project = Project( "root", file("."), settings = buildSettings) aggregate (macros, demo) lazy val macros: Project = Project( "macros", file("macros"), settings = buildSettings ++ Seq(libraryDependencies <++= (scalaVersion)(v => Seq(("org.scala-lang" % "scala-compiler" % v), ("org.scala-lang" % "scala-reflect" % v))))) // Dependencies.scala_lang("scala-compiler", "scala-reflect"))) lazy val demo: Project = Project( "demo", file("demo"), settings = buildSettings) dependsOn (macros) } object Dependencies { def scala_lang(artifact: String): Def.Initialize[ModuleID] = (scalaVersion)("org.scala-lang" % artifact % _) } object BuildSettings { lazy val buildSettings = Defaults.defaultSettings ++ Seq( scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.7", "-deprecation", "-unchecked", "-Ywarn-dead-code"), javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.7", "-Xlint:unchecked", "-Xlint:deprecation", "-Xlint:-options"), javaOptions += "-Djava.util.logging.config.file=logging.properties", javaOptions += "-Xmx2G", //scalaVersion := "2.11.0-M5", outputStrategy := Some(StdoutOutput), fork := true, maxErrors := 1, addCompilerPlugin("org.scala-lang.plugins" % "macro-paradise" % "2.0.0-SNAPSHOT" cross CrossVersion.full), resolvers ++= Seq( Resolver.mavenLocal, Resolver.sonatypeRepo("releases"), Resolver.typesafeRepo("releases"), Resolver.typesafeRepo("snapshots"), Resolver.sonatypeRepo("snapshots") ), parallelExecution in Test := false ) } /* "org.specs2" %% "specs2" % "2.2.2" % "test" scalacOptions ++= Seq( "-unchecked", "-deprecation", "-Xlint", "-Ywarn-dead-code", "-language:_", "-target:jvm-1.7", "-encoding", "UTF-8" ) parallelExecution in Test := false */
eigengo/activator-macros
project/build.scala
Scala
apache-2.0
2,021
package beyond import beyond.config.BeyondConfiguration import play.api.libs.concurrent.Promise import play.api.mvc._ import play.api.mvc.Results.InternalServerError import scala.concurrent.ExecutionContext import scala.concurrent.Future object TimeoutFilter extends Filter { import ExecutionContext.Implicits.global def apply(next: (RequestHeader) => Future[Result])(request: RequestHeader): Future[Result] = { val timeout = BeyondConfiguration.requestTimeout val timeoutFuture = Promise.timeout("Timeout", timeout) val resultFuture = next(request) Future.firstCompletedOf(Seq(resultFuture, timeoutFuture)).map { case result: Result => result case errorMessage: String => InternalServerError(errorMessage) } } }
SollmoStudio/beyond
core/app/beyond/TimeoutFilter.scala
Scala
apache-2.0
753
import scala.io._ import scala.actors._ import Actor._ object PageLoader { def getPageSize(url : String) = Source.fromURL(url).mkString.length def getPageText(url : String) = Source.fromURL(url).mkString } var urls = List("http://gmail.google.com", "https://www.linkedin.com/", "http://www.twitter.com") def timeMethod(method: () => Unit) = { val start = System.nanoTime method() val end = System.nanoTime println("Method took " + (end - start)/1000000000.0 + " seconds") } def getPageSizeSequentially() = { for (url <- urls) { println ("Size for " + url + ":" + PageLoader.getPageSize(url)) } } def getPageSizeConcurrently() = { val caller = self for (url <- urls) { actor { caller ! (url, PageLoader.getPageSize(url)) } } for ( i <- 1 to urls.size) { receive { case (url, size) => println ("Size for " + url + ":" + size) } } } println("Sequential run :") timeMethod { getPageSizeSequentially } println("Concurrent run :") timeMethod {getPageSizeConcurrently}
brice/seven-adventure
Week4/day3/sizer_links.scala
Scala
gpl-3.0
1,082
package org.scalameta.invariants import scala.compat.Platform.EOL class InvariantFailedException(message: String) extends Exception(message) object InvariantFailedException { def raise(invariant: String, failures: List[String], debuggees: Map[String, Any]): Nothing = { val mandatory = s""" |invariant failed: |when verifying $invariant |found that ${failures.head} """.trim.stripMargin.split('\n').mkString(EOL) val optionalFailures = failures.tail.headOption .map(_ => EOL + failures.tail.map("and also " + _).mkString(EOL)) .getOrElse("") val optionalLocals = if (debuggees.nonEmpty) EOL + debuggees.toList.sortBy(_._1).map({ case (k, v) => s"where $k = $v" }).mkString(EOL) throw new InvariantFailedException(mandatory + optionalFailures + optionalLocals) } }
scalameta/scalameta
scalameta/common/shared/src/main/scala/org/scalameta/invariants/Exceptions.scala
Scala
bsd-3-clause
828
package akka.persistence.hbase.journal import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hbase.client._ import org.apache.hadoop.hbase.util.Bytes import com.typesafe.config._ import org.apache.hadoop.hbase.{ HColumnDescriptor, HTableDescriptor } import akka.persistence.hbase.common.Const._ object HBaseJournalInit { import Bytes._ import collection.JavaConverters._ /** * Creates (or adds column family to existing table) table to be used with HBaseJournal. * * @return true if a modification was run on hbase (table created or family added) */ def createTable(config: Config, persistentConfig: String): Boolean = { val conf = getHBaseConfig(config, persistentConfig) val admin = new HBaseAdmin(conf) val journalConfig = config.getConfig(persistentConfig) val table = journalConfig.getString("table") val familyName = journalConfig.getString("family") val partitionCount = PARTITION_COUNT // Integer.parseInt(journalConfig.getString("partition.count")) try doInitTable(admin, table, familyName, partitionCount) finally admin.close() } private def doInitTable(admin: HBaseAdmin, tableName: String, familyName: String, partitionCount: Int): Boolean = { if (admin.tableExists(tableName)) { val tableDesc = admin.getTableDescriptor(toBytes(tableName)) if (tableDesc.getFamily(toBytes(familyName)) == null) { // target family does not exists, will add it. admin.addColumn(tableName, new HColumnDescriptor(familyName)) true } else { // existing table is OK, no modifications run. false } } else { val tableDesc = new HTableDescriptor(toBytes(tableName)) val familyDesc = genColumnFamily(toBytes(familyName), 1) tableDesc.addFamily(familyDesc) if (partitionCount > 1) { val splitPoints = getSplitKeys(partitionCount); admin.createTable(tableDesc, splitPoints) } else { admin.createTable(tableDesc) } true } } /** * Construct Configuration, passing in all `hbase.*` keys from the typesafe Config. */ def getHBaseConfig(config: Config, persistenceConfig: String): Configuration = { val c = new Configuration() @inline def hbaseKey(s: String) = "hbase." + s val persistenseConfig = config.getConfig(persistenceConfig) val hbaseConfig = persistenseConfig.getConfig("hbase") // TODO: does not cover all cases hbaseConfig.entrySet().asScala foreach { e => c.set(hbaseKey(e.getKey), e.getValue.unwrapped.toString) } c } private def genColumnFamily(family: Array[Byte], columnMaxVersion: Int): HColumnDescriptor = { val familyDesc: HColumnDescriptor = new HColumnDescriptor(family) .setInMemory(false) .setMaxVersions(columnMaxVersion); familyDesc } private def getSplitKeys(splitNum: Int, isPrint: Boolean = false): Array[Array[Byte]] = { val list = collection.mutable.ListBuffer.empty[Array[Byte]] for (i <- 1 until splitNum) { val keyBytes = collection.mutable.ListBuffer.empty[Byte] keyBytes ++= Bytes.toBytes(padNum(i, 2)) val zeroByte: Byte = Bytes.toBytes(0).tail(0) for (j <- 0 until 24) { keyBytes += zeroByte } val bytes = keyBytes.toArray if (isPrint) println(s" $i ${Bytes.toString(bytes)} ${renderBytes(bytes)}") list.append(bytes) } list.toArray } def padNum(l: Int, howLong: Int): String = String.valueOf(l).reverse.padTo(howLong, "0").reverse.mkString.substring(0, howLong) def renderBytes(bytes: Array[Byte]): String = { bytes.map("%02x".format(_)).mkString } }
hossx/akka-persistence-hbase
src/main/scala/akka/persistence/hbase/journal/HBaseJournalInit.scala
Scala
apache-2.0
3,653
package sky.dispatcher.example.Dispatcher import akka.actor.{Props, ActorSystem} import akka.routing.RoundRobinRouter import com.typesafe.config.ConfigFactory import sky.MsgEchoActor /** * Created by szekai on 22/08/2014. */ object Example1 { def main(args: Array[String]): Unit = { val _system = ActorSystem.create("default-dispatcher",ConfigFactory.load().getConfig("MyDispatcherExample")) val actor = _system.actorOf(Props[MsgEchoActor].withDispatcher("defaultDispatcher").withRouter( RoundRobinRouter(5))) 0 to 25 foreach { i => actor ! i } Thread.sleep(3000) _system.shutdown() } }
szekai/akka-example
AkkaDispatcherExample/src/main/scala/sky/dispatcher/example/Dispatcher/Example1.scala
Scala
apache-2.0
633
/* * Copyright 2014–2018 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.precog.common import quasar.blueeyes._, json._ import quasar.blueeyes.json.serialization.{ Extractor, Decomposer } import quasar.blueeyes.json.serialization.DefaultSerialization._ import quasar.blueeyes.json.serialization.Extractor._ import quasar.blueeyes.json.serialization.Versioned._ import scalaz._ import scalaz.std.string._ import shapeless.HNil sealed trait MetadataType object MetadataType { def toName(metadataType: MetadataType): String = metadataType match { case BooleanValueStats => "BooleanValueStats" case LongValueStats => "LongValueStats" case DoubleValueStats => "DoubleValueStats" case BigDecimalValueStats => "BigDecimalValueStats" case StringValueStats => "StringValueStats" } def fromName(name: String): Option[MetadataType] = name match { case "BooleanValueStats" => Option(BooleanValueStats) case "LongValueStats" => Option(LongValueStats) case "DoubleValueStats" => Option(DoubleValueStats) case "BigDecimalValueStats" => Option(BigDecimalValueStats) case "StringValueStats" => Option(StringValueStats) case _ => None } } sealed trait Metadata { def metadataType: MetadataType def fold[A](bf: BooleanValueStats => A, lf: LongValueStats => A, df: DoubleValueStats => A, bdf: BigDecimalValueStats => A, sf: StringValueStats => A): A def merge(that: Metadata): Option[Metadata] } object Metadata { implicit val MetadataDecomposer: Decomposer[Metadata] = new Decomposer[Metadata] { override def decompose(metadata: Metadata): JValue = { JObject(List(JField(MetadataType.toName(metadata.metadataType), metadata.fold(_.jv, _.jv, _.jv, _.jv, _.jv)))) } } implicit val MetadataExtractor: Extractor[Metadata] = new Extractor[Metadata] { override def validated(obj: JValue): Validation[Error, Metadata] = obj match { case metadata @ JObject(entries) if entries.size == 1 => { val (key, value) = entries.head MetadataType.fromName(key).map { case BooleanValueStats => value.validated[BooleanValueStats] case LongValueStats => value.validated[LongValueStats] case DoubleValueStats => value.validated[DoubleValueStats] case BigDecimalValueStats => value.validated[BigDecimalValueStats] case StringValueStats => value.validated[StringValueStats] } getOrElse { Failure(Invalid("Unknown metadata type: " + key)) } } case _ => Failure(Invalid("Invalid metadata entry: " + obj)) } } implicit val MetadataSemigroup = new Semigroup[Map[MetadataType, Metadata]] { def append(m1: Map[MetadataType, Metadata], m2: => Map[MetadataType, Metadata]) = m1.foldLeft(m2) { (acc, t) => val (mtype, meta) = t acc + (mtype -> acc.get(mtype).map(combineMetadata(_, meta)).getOrElse(meta)) } def combineMetadata(m1: Metadata, m2: Metadata) = m1.merge(m2).getOrElse(sys.error("Invalid attempt to combine incompatible metadata")) } } sealed trait MetadataStats extends Metadata { def count: Long } case class BooleanValueStats(count: Long, trueCount: Long) extends MetadataStats { def falseCount: Long = count - trueCount def probability: Double = trueCount.toDouble / count def metadataType = BooleanValueStats def fold[A](bf: BooleanValueStats => A, lf: LongValueStats => A, df: DoubleValueStats => A, bdf: BigDecimalValueStats => A, sf: StringValueStats => A): A = bf(this) def merge(that: Metadata) = that match { case BooleanValueStats(count, trueCount) => Some(BooleanValueStats(this.count + count, this.trueCount + trueCount)) case _ => None } } object BooleanValueStats extends MetadataType { val schemaV1 = "count" :: "trueCount" :: HNil implicit val decomposerV1: Decomposer[BooleanValueStats] = decomposerV[BooleanValueStats](schemaV1, Some("1.0".v)) implicit val extractorV1: Extractor[BooleanValueStats] = extractorV[BooleanValueStats](schemaV1, Some("1.0".v)) } case class LongValueStats(count: Long, min: Long, max: Long) extends MetadataStats { def metadataType = LongValueStats def fold[A](bf: BooleanValueStats => A, lf: LongValueStats => A, df: DoubleValueStats => A, bdf: BigDecimalValueStats => A, sf: StringValueStats => A): A = lf(this) def merge(that: Metadata) = that match { case LongValueStats(count, min, max) => Some(LongValueStats(this.count + count, this.min.min(min), this.max.max(max))) case _ => None } } object LongValueStats extends MetadataType { val schemaV1 = "count" :: "min" :: "max" :: HNil implicit val decomposerV1: Decomposer[LongValueStats] = decomposerV[LongValueStats](schemaV1, Some("1.0".v)) implicit val extractorV1: Extractor[LongValueStats] = extractorV[LongValueStats](schemaV1, Some("1.0".v)) } case class DoubleValueStats(count: Long, min: Double, max: Double) extends MetadataStats { def metadataType = DoubleValueStats def fold[A](bf: BooleanValueStats => A, lf: LongValueStats => A, df: DoubleValueStats => A, bdf: BigDecimalValueStats => A, sf: StringValueStats => A): A = df(this) def merge(that: Metadata) = that match { case DoubleValueStats(count, min, max) => Some(DoubleValueStats(this.count + count, this.min min min, this.max max max)) case _ => None } } object DoubleValueStats extends MetadataType { val schemaV1 = "count" :: "min" :: "max" :: HNil implicit val decomposerV1: Decomposer[DoubleValueStats] = decomposerV[DoubleValueStats](schemaV1, Some("1.0".v)) implicit val extractorV1: Extractor[DoubleValueStats] = extractorV[DoubleValueStats](schemaV1, Some("1.0".v)) } case class BigDecimalValueStats(count: Long, min: BigDecimal, max: BigDecimal) extends MetadataStats { def metadataType = BigDecimalValueStats def fold[A](bf: BooleanValueStats => A, lf: LongValueStats => A, df: DoubleValueStats => A, bdf: BigDecimalValueStats => A, sf: StringValueStats => A): A = bdf(this) def merge(that: Metadata) = that match { case BigDecimalValueStats(count, min, max) => Some(BigDecimalValueStats(this.count + count, this.min min min, this.max max max)) case _ => None } } object BigDecimalValueStats extends MetadataType { val schemaV1 = "count" :: "min" :: "max" :: HNil implicit val decomposerV1: Decomposer[BigDecimalValueStats] = decomposerV[BigDecimalValueStats](schemaV1, Some("1.0".v)) implicit val extractorV1: Extractor[BigDecimalValueStats] = extractorV[BigDecimalValueStats](schemaV1, Some("1.0".v)) } case class StringValueStats(count: Long, min: String, max: String) extends MetadataStats { def metadataType = StringValueStats def fold[A](bf: BooleanValueStats => A, lf: LongValueStats => A, df: DoubleValueStats => A, bdf: BigDecimalValueStats => A, sf: StringValueStats => A): A = sf(this) def merge(that: Metadata) = that match { case StringValueStats(count, min, max) => Some(StringValueStats(this.count + count, scalaz.Order[String].min(this.min, min), scalaz.Order[String].max(this.max, max))) case _ => None } } object StringValueStats extends MetadataType { val schemaV1 = "count" :: "min" :: "max" :: HNil implicit val decomposerV1: Decomposer[StringValueStats] = decomposerV[StringValueStats](schemaV1, Some("1.0".v)) implicit val extractorV1: Extractor[StringValueStats] = extractorV[StringValueStats](schemaV1, Some("1.0".v)) }
jedesah/Quasar
blueeyes/src/main/scala/quasar/precog/common/Metadata.scala
Scala
apache-2.0
8,365
given { def foo = 1 // error }
som-snytt/dotty
tests/neg/empty-given.scala
Scala
apache-2.0
33
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ly.stealth.mesos.mirrormaker import java.io._ import java.text.SimpleDateFormat import java.util import java.util.{Properties, Date} import org.apache.mesos.Protos import org.apache.mesos.Protos._ import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ import scala.collection.mutable import scala.util.Try object Util { def parseList(s: String, entrySep: Char = ',', valueSep: Char = '=', nullValues: Boolean = true): List[(String, String)] = { def splitEscaped(s: String, sep: Char, unescape: Boolean = false): Array[String] = { val parts = new util.ArrayList[String]() var escaped = false var part = "" for (c <- s.toCharArray) { if (c == '\\\\' && !escaped) escaped = true else if (c == sep && !escaped) { parts.add(part) part = "" } else { if (escaped && !unescape) part += "\\\\" part += c escaped = false } } if (escaped) throw new IllegalArgumentException("open escaping") if (part != "") parts.add(part) parts.toArray(Array[String]()) } val result = new mutable.ListBuffer[(String, String)]() if (s == null) return result.toList for (entry <- splitEscaped(s, entrySep)) { if (entry.trim.isEmpty) throw new IllegalArgumentException(s) val pair = splitEscaped(entry, valueSep, unescape = true) val key: String = pair(0).trim val value: String = if (pair.length > 1) pair(1).trim else null if (value == null && !nullValues) throw new IllegalArgumentException(s) result += key -> value } result.toList } def parseMap(s: String, entrySep: Char = ',', valueSep: Char = '=', nullValues: Boolean = true): Map[String, String] = parseList(s, entrySep, valueSep, nullValues).toMap def formatList(list: Seq[(String, _ <: Any)], entrySep: Char = ',', valueSep: Char = '='): String = { def escape(s: String): String = { var result = "" for (c <- s.toCharArray) { if (c == entrySep || c == valueSep || c == '\\\\') result += "\\\\" result += c } result } var s = "" list.foreach { tuple => if (!s.isEmpty) s += entrySep s += escape(tuple._1) if (tuple._2 != null) s += valueSep + escape("" + tuple._2) } s } def formatMap(map: collection.Map[String, _ <: Any], entrySep: Char = ',', valueSep: Char = '='): String = formatList(map.toList, entrySep, valueSep) object Str { def dateTime(date: Date): String = { new SimpleDateFormat("yyyy-MM-dd hh:mm:ssX").format(date) } def framework(framework: FrameworkInfo): String = { var s = "" s += id(framework.getId.getValue) s += " name: " + framework.getName s += " host: " + framework.getHostname s += " failover_timeout: " + framework.getFailoverTimeout s } def master(master: MasterInfo): String = { var s = "" s += id(master.getId) s += " pid:" + master.getPid s += " host:" + master.getHostname s } def slave(slave: SlaveInfo): String = { var s = "" s += id(slave.getId.getValue) s += " host:" + slave.getHostname s += " port:" + slave.getPort s += " " + resources(slave.getResourcesList) s } def offer(offer: Offer): String = { var s = "" s += offer.getHostname + id(offer.getId.getValue) s += " " + resources(offer.getResourcesList) s += " " + attributes(offer.getAttributesList) s } def offers(offers: Iterable[Offer]): String = { var s = "" for (offer <- offers) s += (if (s.isEmpty) "" else "\\n") + Str.offer(offer) s } def task(task: TaskInfo): String = { var s = "" s += task.getTaskId.getValue s += " slave:" + id(task.getSlaveId.getValue) s += " " + resources(task.getResourcesList) s += " data:" + new String(task.getData.toByteArray) s } def resources(resources: util.List[Protos.Resource]): String = { var s = "" val order: util.List[String] = "cpus mem disk ports".split(" ").toList for (resource <- resources.sortBy(r => order.indexOf(r.getName))) { if (!s.isEmpty) s += " " s += resource.getName + ":" if (resource.hasScalar) s += "%.2f".format(resource.getScalar.getValue) if (resource.hasRanges) for (range <- resource.getRanges.getRangeList) s += "[" + range.getBegin + ".." + range.getEnd + "]" } s } def attributes(attributes: util.List[Protos.Attribute]): String = { var s = "" for (attr <- attributes) { if (!s.isEmpty) s += ";" s += attr.getName + ":" if (attr.hasText) s += attr.getText.getValue if (attr.hasScalar) s += "%.2f".format(attr.getScalar.getValue) } s } def taskStatus(status: TaskStatus): String = { var s = "" s += status.getTaskId.getValue s += " " + status.getState.name() s += " slave:" + id(status.getSlaveId.getValue) if (status.getState != TaskState.TASK_RUNNING) s += " reason:" + status.getReason.name() if (status.getMessage != null && status.getMessage != "") s += " message:" + status.getMessage s } def id(id: String): String = "#" + suffix(id, 5) def suffix(s: String, maxLen: Int): String = { if (s.length <= maxLen) return s s.substring(s.length - maxLen) } } def copyAndClose(in: InputStream, out: OutputStream): Unit = { val buffer = new Array[Byte](128 * 1024) var actuallyRead = 0 try { while (actuallyRead != -1) { actuallyRead = in.read(buffer) if (actuallyRead != -1) out.write(buffer, 0, actuallyRead) } } finally { try { in.close() } catch { case ignore: IOException => } try { out.close() } catch { case ignore: IOException => } } } def getScalarResources(offer: Offer, name: String): Double = { offer.getResourcesList.foldLeft(0.0) { (all, current) => if (current.getName == name) all + current.getScalar.getValue else all } } def getRangeResources(offer: Offer, name: String): List[Protos.Value.Range] = { offer.getResourcesList.foldLeft[List[Protos.Value.Range]](List()) { case (all, current) => if (current.getName == name) all ++ current.getRanges.getRangeList else all } } def loadConfigFile(cfg: File): Properties = { val props = new Properties() var is: InputStream = null try { is = new FileInputStream(cfg) props.load(is) } finally { Try(if (is != null) is.close()) } props } def mapTopProp(kvs: Map[_ <: AnyRef, _ <: AnyRef]):Properties = { val props = new Properties() props.putAll(kvs.asJava) props } def propsPrettyPrint(kvs: Map[_ <: AnyRef, _ <: AnyRef]): String = kvs.map { case (k, v) => s"$k=$v" }.mkString("\\n") /** * convert null and empty string (incl. spaces-only) to None */ def ?(str: String): Option[String] = Option(str).map(_.trim).filter(_.nonEmpty) /** * Take first non-empty element */ def resolve[A](args: Option[A]*): Option[A] = args.flatten.headOption }
stealthly/mirror-maker-mesos
src/main/scala/ly/stealth/mesos/mirrormaker/Util.scala
Scala
apache-2.0
8,173
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.api.java import java.io.{Closeable, InputStream} import java.lang.{Boolean => JBoolean} import java.util.{List => JList, Map => JMap} import scala.annotation.varargs import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat} import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.api.java.{JavaPairRDD, JavaRDD, JavaSparkContext} import org.apache.spark.api.java.function.{Function => JFunction, Function2 => JFunction2} import org.apache.spark.api.java.function.{Function0 => JFunction0} import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.rdd.RDD import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming._ import org.apache.spark.streaming.receiver.Receiver import org.apache.spark.streaming.scheduler.StreamingListener /** * A Java-friendly version of [[org.apache.spark.streaming.StreamingContext]] which is the main * entry point for Spark Streaming functionality. It provides methods to create * [[org.apache.spark.streaming.api.java.JavaDStream]] and * [[org.apache.spark.streaming.api.java.JavaPairDStream]] from input sources. The internal * org.apache.spark.api.java.JavaSparkContext (see core Spark documentation) can be accessed * using `context.sparkContext`. After creating and transforming DStreams, the streaming * computation can be started and stopped using `context.start()` and `context.stop()`, * respectively. `context.awaitTermination()` allows the current thread to wait for the * termination of a context by `stop()` or by an exception. */ class JavaStreamingContext(val ssc: StreamingContext) extends Closeable { /** * Create a StreamingContext. * @param master Name of the Spark Master * @param appName Name to be used when registering with the scheduler * @param batchDuration The time interval at which streaming data will be divided into batches */ def this(master: String, appName: String, batchDuration: Duration) = this(new StreamingContext(master, appName, batchDuration, null, Nil, Map())) /** * Create a StreamingContext. * @param master Name of the Spark Master * @param appName Name to be used when registering with the scheduler * @param batchDuration The time interval at which streaming data will be divided into batches * @param sparkHome The SPARK_HOME directory on the slave nodes * @param jarFile JAR file containing job code, to ship to cluster. This can be a path on the * local file system or an HDFS, HTTP, HTTPS, or FTP URL. */ def this( master: String, appName: String, batchDuration: Duration, sparkHome: String, jarFile: String) = this(new StreamingContext(master, appName, batchDuration, sparkHome, Seq(jarFile), Map())) /** * Create a StreamingContext. * @param master Name of the Spark Master * @param appName Name to be used when registering with the scheduler * @param batchDuration The time interval at which streaming data will be divided into batches * @param sparkHome The SPARK_HOME directory on the slave nodes * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ def this( master: String, appName: String, batchDuration: Duration, sparkHome: String, jars: Array[String]) = this(new StreamingContext(master, appName, batchDuration, sparkHome, jars, Map())) /** * Create a StreamingContext. * @param master Name of the Spark Master * @param appName Name to be used when registering with the scheduler * @param batchDuration The time interval at which streaming data will be divided into batches * @param sparkHome The SPARK_HOME directory on the slave nodes * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes */ def this( master: String, appName: String, batchDuration: Duration, sparkHome: String, jars: Array[String], environment: JMap[String, String]) = this(new StreamingContext( master, appName, batchDuration, sparkHome, jars, environment.asScala)) /** * Create a JavaStreamingContext using an existing JavaSparkContext. * @param sparkContext The underlying JavaSparkContext to use * @param batchDuration The time interval at which streaming data will be divided into batches */ def this(sparkContext: JavaSparkContext, batchDuration: Duration) = this(new StreamingContext(sparkContext.sc, batchDuration)) /** * Create a JavaStreamingContext using a SparkConf configuration. * @param conf A Spark application configuration * @param batchDuration The time interval at which streaming data will be divided into batches */ def this(conf: SparkConf, batchDuration: Duration) = this(new StreamingContext(conf, batchDuration)) /** * Recreate a JavaStreamingContext from a checkpoint file. * @param path Path to the directory that was specified as the checkpoint directory */ def this(path: String) = this(new StreamingContext(path, SparkHadoopUtil.get.conf)) /** * Re-creates a JavaStreamingContext from a checkpoint file. * @param path Path to the directory that was specified as the checkpoint directory * */ def this(path: String, hadoopConf: Configuration) = this(new StreamingContext(path, hadoopConf)) /** The underlying SparkContext */ val sparkContext = new JavaSparkContext(ssc.sc) /** * Create an input stream from network source hostname:port. Data is received using * a TCP socket and the receive bytes is interpreted as UTF8 encoded \\n delimited * lines. * @param hostname Hostname to connect to for receiving data * @param port Port to connect to for receiving data * @param storageLevel Storage level to use for storing the received objects */ def socketTextStream( hostname: String, port: Int, storageLevel: StorageLevel ): JavaReceiverInputDStream[String] = { ssc.socketTextStream(hostname, port, storageLevel) } /** * Create an input stream from network source hostname:port. Data is received using * a TCP socket and the receive bytes is interpreted as UTF8 encoded \\n delimited * lines. Storage level of the data will be the default StorageLevel.MEMORY_AND_DISK_SER_2. * @param hostname Hostname to connect to for receiving data * @param port Port to connect to for receiving data */ def socketTextStream(hostname: String, port: Int): JavaReceiverInputDStream[String] = { ssc.socketTextStream(hostname, port) } /** * Create an input stream from network source hostname:port. Data is received using * a TCP socket and the receive bytes it interpreted as object using the given * converter. * @param hostname Hostname to connect to for receiving data * @param port Port to connect to for receiving data * @param converter Function to convert the byte stream to objects * @param storageLevel Storage level to use for storing the received objects * @tparam T Type of the objects received (after converting bytes to objects) */ def socketStream[T]( hostname: String, port: Int, converter: JFunction[InputStream, java.lang.Iterable[T]], storageLevel: StorageLevel) : JavaReceiverInputDStream[T] = { def fn: (InputStream) => Iterator[T] = (x: InputStream) => converter.call(x).iterator().asScala implicit val cmt: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] ssc.socketStream(hostname, port, fn, storageLevel) } /** * Create an input stream that monitors a Hadoop-compatible filesystem * for new files and reads them as text files (using key as LongWritable, value * as Text and input format as TextInputFormat). Files must be written to the * monitored directory by "moving" them from another location within the same * file system. File names starting with . are ignored. * The text files must be encoded as UTF-8. * * @param directory HDFS directory to monitor for new file */ def textFileStream(directory: String): JavaDStream[String] = { ssc.textFileStream(directory) } /** * Create an input stream that monitors a Hadoop-compatible filesystem * for new files and reads them as flat binary files with fixed record lengths, * yielding byte arrays * * @param directory HDFS directory to monitor for new files * @param recordLength The length at which to split the records * * @note We ensure that the byte array for each record in the * resulting RDDs of the DStream has the provided record length. */ def binaryRecordsStream(directory: String, recordLength: Int): JavaDStream[Array[Byte]] = { ssc.binaryRecordsStream(directory, recordLength) } /** * Create an input stream from network source hostname:port, where data is received * as serialized blocks (serialized using the Spark's serializer) that can be directly * pushed into the block manager without deserializing them. This is the most efficient * way to receive data. * @param hostname Hostname to connect to for receiving data * @param port Port to connect to for receiving data * @param storageLevel Storage level to use for storing the received objects * @tparam T Type of the objects in the received blocks */ def rawSocketStream[T]( hostname: String, port: Int, storageLevel: StorageLevel): JavaReceiverInputDStream[T] = { implicit val cmt: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] JavaReceiverInputDStream.fromReceiverInputDStream( ssc.rawSocketStream(hostname, port, storageLevel)) } /** * Create an input stream from network source hostname:port, where data is received * as serialized blocks (serialized using the Spark's serializer) that can be directly * pushed into the block manager without deserializing them. This is the most efficient * way to receive data. * @param hostname Hostname to connect to for receiving data * @param port Port to connect to for receiving data * @tparam T Type of the objects in the received blocks */ def rawSocketStream[T](hostname: String, port: Int): JavaReceiverInputDStream[T] = { implicit val cmt: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] JavaReceiverInputDStream.fromReceiverInputDStream( ssc.rawSocketStream(hostname, port)) } /** * Create an input stream that monitors a Hadoop-compatible filesystem * for new files and reads them using the given key-value types and input format. * Files must be written to the monitored directory by "moving" them from another * location within the same file system. File names starting with . are ignored. * @param directory HDFS directory to monitor for new file * @param kClass class of key for reading HDFS file * @param vClass class of value for reading HDFS file * @param fClass class of input format for reading HDFS file * @tparam K Key type for reading HDFS file * @tparam V Value type for reading HDFS file * @tparam F Input format for reading HDFS file */ def fileStream[K, V, F <: NewInputFormat[K, V]]( directory: String, kClass: Class[K], vClass: Class[V], fClass: Class[F]): JavaPairInputDStream[K, V] = { implicit val cmk: ClassTag[K] = ClassTag(kClass) implicit val cmv: ClassTag[V] = ClassTag(vClass) implicit val cmf: ClassTag[F] = ClassTag(fClass) ssc.fileStream[K, V, F](directory) } /** * Create an input stream that monitors a Hadoop-compatible filesystem * for new files and reads them using the given key-value types and input format. * Files must be written to the monitored directory by "moving" them from another * location within the same file system. File names starting with . are ignored. * @param directory HDFS directory to monitor for new file * @param kClass class of key for reading HDFS file * @param vClass class of value for reading HDFS file * @param fClass class of input format for reading HDFS file * @param filter Function to filter paths to process * @param newFilesOnly Should process only new files and ignore existing files in the directory * @tparam K Key type for reading HDFS file * @tparam V Value type for reading HDFS file * @tparam F Input format for reading HDFS file */ def fileStream[K, V, F <: NewInputFormat[K, V]]( directory: String, kClass: Class[K], vClass: Class[V], fClass: Class[F], filter: JFunction[Path, JBoolean], newFilesOnly: Boolean): JavaPairInputDStream[K, V] = { implicit val cmk: ClassTag[K] = ClassTag(kClass) implicit val cmv: ClassTag[V] = ClassTag(vClass) implicit val cmf: ClassTag[F] = ClassTag(fClass) def fn: (Path) => Boolean = (x: Path) => filter.call(x).booleanValue() ssc.fileStream[K, V, F](directory, fn, newFilesOnly) } /** * Create an input stream that monitors a Hadoop-compatible filesystem * for new files and reads them using the given key-value types and input format. * Files must be written to the monitored directory by "moving" them from another * location within the same file system. File names starting with . are ignored. * @param directory HDFS directory to monitor for new file * @param kClass class of key for reading HDFS file * @param vClass class of value for reading HDFS file * @param fClass class of input format for reading HDFS file * @param filter Function to filter paths to process * @param newFilesOnly Should process only new files and ignore existing files in the directory * @param conf Hadoop configuration * @tparam K Key type for reading HDFS file * @tparam V Value type for reading HDFS file * @tparam F Input format for reading HDFS file */ def fileStream[K, V, F <: NewInputFormat[K, V]]( directory: String, kClass: Class[K], vClass: Class[V], fClass: Class[F], filter: JFunction[Path, JBoolean], newFilesOnly: Boolean, conf: Configuration): JavaPairInputDStream[K, V] = { implicit val cmk: ClassTag[K] = ClassTag(kClass) implicit val cmv: ClassTag[V] = ClassTag(vClass) implicit val cmf: ClassTag[F] = ClassTag(fClass) def fn: (Path) => Boolean = (x: Path) => filter.call(x).booleanValue() ssc.fileStream[K, V, F](directory, fn, newFilesOnly, conf) } /** * Create an input stream from a queue of RDDs. In each batch, * it will process either one or all of the RDDs returned by the queue. * * @param queue Queue of RDDs * @tparam T Type of objects in the RDD * * @note * 1. Changes to the queue after the stream is created will not be recognized. * 2. Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of * those RDDs, so `queueStream` doesn't support checkpointing. */ def queueStream[T](queue: java.util.Queue[JavaRDD[T]]): JavaDStream[T] = { implicit val cm: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] val sQueue = new scala.collection.mutable.Queue[RDD[T]] sQueue.enqueue(queue.asScala.map(_.rdd).toSeq: _*) ssc.queueStream(sQueue) } /** * Create an input stream from a queue of RDDs. In each batch, * it will process either one or all of the RDDs returned by the queue. * * @param queue Queue of RDDs * @param oneAtATime Whether only one RDD should be consumed from the queue in every interval * @tparam T Type of objects in the RDD * * @note * 1. Changes to the queue after the stream is created will not be recognized. * 2. Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of * those RDDs, so `queueStream` doesn't support checkpointing. */ def queueStream[T]( queue: java.util.Queue[JavaRDD[T]], oneAtATime: Boolean ): JavaInputDStream[T] = { implicit val cm: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] val sQueue = new scala.collection.mutable.Queue[RDD[T]] sQueue.enqueue(queue.asScala.map(_.rdd).toSeq: _*) ssc.queueStream(sQueue, oneAtATime) } /** * Create an input stream from a queue of RDDs. In each batch, * it will process either one or all of the RDDs returned by the queue. * * @note * 1. Changes to the queue after the stream is created will not be recognized. * 2. Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of * those RDDs, so `queueStream` doesn't support checkpointing. * * @param queue Queue of RDDs * @param oneAtATime Whether only one RDD should be consumed from the queue in every interval * @param defaultRDD Default RDD is returned by the DStream when the queue is empty * @tparam T Type of objects in the RDD */ def queueStream[T]( queue: java.util.Queue[JavaRDD[T]], oneAtATime: Boolean, defaultRDD: JavaRDD[T]): JavaInputDStream[T] = { implicit val cm: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] val sQueue = new scala.collection.mutable.Queue[RDD[T]] sQueue.enqueue(queue.asScala.map(_.rdd).toSeq: _*) ssc.queueStream(sQueue, oneAtATime, defaultRDD.rdd) } /** * Create an input stream with any arbitrary user implemented receiver. * Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html * @param receiver Custom implementation of Receiver */ def receiverStream[T](receiver: Receiver[T]): JavaReceiverInputDStream[T] = { implicit val cm: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] ssc.receiverStream(receiver) } /** * Create a unified DStream from multiple DStreams of the same type and same slide duration. */ @varargs def union[T](jdstreams: JavaDStream[T]*): JavaDStream[T] = { require(jdstreams.nonEmpty, "Union called on no streams") implicit val cm: ClassTag[T] = jdstreams.head.classTag ssc.union(jdstreams.map(_.dstream))(cm) } /** * Create a unified DStream from multiple DStreams of the same type and same slide duration. */ @varargs def union[K, V](jdstreams: JavaPairDStream[K, V]*): JavaPairDStream[K, V] = { require(jdstreams.nonEmpty, "Union called on no streams") implicit val cm: ClassTag[(K, V)] = jdstreams.head.classTag implicit val kcm: ClassTag[K] = jdstreams.head.kManifest implicit val vcm: ClassTag[V] = jdstreams.head.vManifest new JavaPairDStream[K, V](ssc.union(jdstreams.map(_.dstream))(cm))(kcm, vcm) } /** * Create a new DStream in which each RDD is generated by applying a function on RDDs of * the DStreams. The order of the JavaRDDs in the transform function parameter will be the * same as the order of corresponding DStreams in the list. * * @note For adding a JavaPairDStream in the list of JavaDStreams, convert it to a * JavaDStream using [[org.apache.spark.streaming.api.java.JavaPairDStream]].toJavaDStream(). * In the transform function, convert the JavaRDD corresponding to that JavaDStream to * a JavaPairRDD using org.apache.spark.api.java.JavaPairRDD.fromJavaRDD(). */ def transform[T]( dstreams: JList[JavaDStream[_]], transformFunc: JFunction2[JList[JavaRDD[_]], Time, JavaRDD[T]] ): JavaDStream[T] = { implicit val cmt: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]] val scalaTransformFunc = (rdds: Seq[RDD[_]], time: Time) => { val jrdds = rdds.map(JavaRDD.fromRDD(_)).asJava transformFunc.call(jrdds, time).rdd } ssc.transform(dstreams.asScala.map(_.dstream).toSeq, scalaTransformFunc) } /** * Create a new DStream in which each RDD is generated by applying a function on RDDs of * the DStreams. The order of the JavaRDDs in the transform function parameter will be the * same as the order of corresponding DStreams in the list. * * @note For adding a JavaPairDStream in the list of JavaDStreams, convert it to * a JavaDStream using [[org.apache.spark.streaming.api.java.JavaPairDStream]].toJavaDStream(). * In the transform function, convert the JavaRDD corresponding to that JavaDStream to * a JavaPairRDD using org.apache.spark.api.java.JavaPairRDD.fromJavaRDD(). */ def transformToPair[K, V]( dstreams: JList[JavaDStream[_]], transformFunc: JFunction2[JList[JavaRDD[_]], Time, JavaPairRDD[K, V]] ): JavaPairDStream[K, V] = { implicit val cmk: ClassTag[K] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K]] implicit val cmv: ClassTag[V] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V]] val scalaTransformFunc = (rdds: Seq[RDD[_]], time: Time) => { val jrdds = rdds.map(JavaRDD.fromRDD(_)).asJava transformFunc.call(jrdds, time).rdd } ssc.transform(dstreams.asScala.map(_.dstream).toSeq, scalaTransformFunc) } /** * Sets the context to periodically checkpoint the DStream operations for master * fault-tolerance. The graph will be checkpointed every batch interval. * @param directory HDFS-compatible directory where the checkpoint data will be reliably stored */ def checkpoint(directory: String) { ssc.checkpoint(directory) } /** * Sets each DStreams in this context to remember RDDs it generated in the last given duration. * DStreams remember RDDs only for a limited duration of duration and releases them for garbage * collection. This method allows the developer to specify how long to remember the RDDs ( * if the developer wishes to query old data outside the DStream computation). * @param duration Minimum duration that each DStream should remember its RDDs */ def remember(duration: Duration) { ssc.remember(duration) } /** * Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for * receiving system events related to streaming. */ def addStreamingListener(streamingListener: StreamingListener) { ssc.addStreamingListener(streamingListener) } /** * :: DeveloperApi :: * * Return the current state of the context. The context can be in three possible states - * <ul> * <li> * StreamingContextState.INITIALIZED - The context has been created, but not been started yet. * Input DStreams, transformations and output operations can be created on the context. * </li> * <li> * StreamingContextState.ACTIVE - The context has been started, and been not stopped. * Input DStreams, transformations and output operations cannot be created on the context. * </li> * <li> * StreamingContextState.STOPPED - The context has been stopped and cannot be used any more. * </li> * </ul> */ def getState(): StreamingContextState = { ssc.getState() } /** * Start the execution of the streams. */ def start(): Unit = { ssc.start() } /** * Wait for the execution to stop. Any exceptions that occurs during the execution * will be thrown in this thread. */ @throws[InterruptedException] def awaitTermination(): Unit = { ssc.awaitTermination() } /** * Wait for the execution to stop. Any exceptions that occurs during the execution * will be thrown in this thread. * * @param timeout time to wait in milliseconds * @return `true` if it's stopped; or throw the reported error during the execution; or `false` * if the waiting time elapsed before returning from the method. */ @throws[InterruptedException] def awaitTerminationOrTimeout(timeout: Long): Boolean = { ssc.awaitTerminationOrTimeout(timeout) } /** * Stop the execution of the streams. Will stop the associated JavaSparkContext as well. */ def stop(): Unit = { ssc.stop() } /** * Stop the execution of the streams. * @param stopSparkContext Stop the associated SparkContext or not */ def stop(stopSparkContext: Boolean): Unit = ssc.stop(stopSparkContext) /** * Stop the execution of the streams. * @param stopSparkContext Stop the associated SparkContext or not * @param stopGracefully Stop gracefully by waiting for the processing of all * received data to be completed */ def stop(stopSparkContext: Boolean, stopGracefully: Boolean): Unit = { ssc.stop(stopSparkContext, stopGracefully) } override def close(): Unit = stop() } /** * JavaStreamingContext object contains a number of utility functions. */ object JavaStreamingContext { /** * Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. * If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be * recreated from the checkpoint data. If the data does not exist, then the provided factory * will be used to create a JavaStreamingContext. * * @param checkpointPath Checkpoint directory used in an earlier JavaStreamingContext program * @param creatingFunc Function to create a new JavaStreamingContext */ def getOrCreate( checkpointPath: String, creatingFunc: JFunction0[JavaStreamingContext] ): JavaStreamingContext = { val ssc = StreamingContext.getOrCreate(checkpointPath, () => { creatingFunc.call().ssc }) new JavaStreamingContext(ssc) } /** * Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. * If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be * recreated from the checkpoint data. If the data does not exist, then the provided factory * will be used to create a JavaStreamingContext. * * @param checkpointPath Checkpoint directory used in an earlier StreamingContext program * @param creatingFunc Function to create a new JavaStreamingContext * @param hadoopConf Hadoop configuration if necessary for reading from any HDFS compatible * file system */ def getOrCreate( checkpointPath: String, creatingFunc: JFunction0[JavaStreamingContext], hadoopConf: Configuration ): JavaStreamingContext = { val ssc = StreamingContext.getOrCreate(checkpointPath, () => { creatingFunc.call().ssc }, hadoopConf) new JavaStreamingContext(ssc) } /** * Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. * If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be * recreated from the checkpoint data. If the data does not exist, then the provided factory * will be used to create a JavaStreamingContext. * * @param checkpointPath Checkpoint directory used in an earlier StreamingContext program * @param creatingFunc Function to create a new JavaStreamingContext * @param hadoopConf Hadoop configuration if necessary for reading from any HDFS compatible * file system * @param createOnError Whether to create a new JavaStreamingContext if there is an * error in reading checkpoint data. */ def getOrCreate( checkpointPath: String, creatingFunc: JFunction0[JavaStreamingContext], hadoopConf: Configuration, createOnError: Boolean ): JavaStreamingContext = { val ssc = StreamingContext.getOrCreate(checkpointPath, () => { creatingFunc.call().ssc }, hadoopConf, createOnError) new JavaStreamingContext(ssc) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to StreamingContext. */ def jarOfClass(cls: Class[_]): Array[String] = SparkContext.jarOfClass(cls).toArray }
pgandhi999/spark
streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala
Scala
apache-2.0
29,080
package com.example import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorSystem, Props} import akka.io.IO import spray.can.Http import spray.can.server.Stats import scala.concurrent.duration._ object Boot extends App { // we need an ActorSystem to host our application in implicit val system = ActorSystem("on-spray-can") // create and start our service actor val service = system.actorOf(Props[MyServiceActor], "demo-service") val statsActor = system.actorOf(Props(new StatsActor), "stats") implicit val ec = system.dispatcher // start a new HTTP server on port 8080 with our service actor as the handler IO(Http).tell(Http.Bind(service, interface = "0.0.0.0", port = 8080), statsActor) } class StatsActor extends Actor { override def receive: Receive = { case b: Http.Bound => { println(b) implicit val ec = context.dispatcher context.system.scheduler.schedule(Duration(1, TimeUnit.SECONDS), Duration(1, TimeUnit.SECONDS), sender(), Http.GetStats) } case s: Stats => println(s"Total: ${s.totalConnections} Open:${s.openConnections}") } }
rafax/c100k
spray/src/main/scala/com/example/Boot.scala
Scala
gpl-2.0
1,114
package im.actor.server.email import im.actor.config.ActorConfig import scala.util.Try import com.github.kxbmap.configs.syntax._ import com.typesafe.config.Config private[email] case class Sender(address: String, name: String, prefix: String) private[email] case class Smtp(host: String, port: Int, username: String, password: String, tls: Boolean) case class EmailConfig( sender: Sender, smtp: Smtp ) object EmailConfig { def load(config: Config): Try[EmailConfig] = Try(config.extract[EmailConfig]) def load: Try[EmailConfig] = load(ActorConfig.load().getConfig("services.email")) }
EaglesoftZJ/actor-platform
actor-server/actor-email/src/main/scala/im/actor/server/email/EmailConfig.scala
Scala
agpl-3.0
600
import scala.quoted.* object Macros { inline def test(): String = ${ testImpl } private def testImpl(using Quotes) : Expr[String] = { import quotes.reflect.* val classSym = TypeRepr.of[Function1].classSymbol.get classSym.declaredMethod("apply") classSym.declaredMethods classSym.memberMethod("apply") Expr(classSym.memberMethods.map(_.name).sorted.mkString("\n")) } }
lampepfl/dotty
tests/run-macros/i6518/Macro_1.scala
Scala
apache-2.0
402
package com.datawizards.sparklocal.impl.scala.accumulator import java.lang import com.datawizards.sparklocal.accumulator.{AccumulatorV2API, LongAccumulatorAPI} class LongAccumulatorAPIScalaImpl(name: Option[String]=None) extends AccumulatorV2APIScalaImpl[java.lang.Long, java.lang.Long](name) with LongAccumulatorAPI { private var _sum = 0L private var _count = 0L override def isZero: Boolean = _sum == 0L && _count == 0 override def copy(): AccumulatorV2API[lang.Long, lang.Long] = { val newAcc = new LongAccumulatorAPIScalaImpl newAcc._count = this._count newAcc._sum = this._sum newAcc } override def reset(): Unit = { _sum = 0L _count = 0L } override def add(v: lang.Long): Unit = { _sum += v _count += 1 } override def merge(other: AccumulatorV2API[lang.Long, lang.Long]): Unit = other match { case a:LongAccumulatorAPIScalaImpl => _sum += a._sum _count += a._count case _ => throw new UnsupportedOperationException( s"Cannot merge ${this.getClass.getName} with ${other.getClass.getName}") } override def value: lang.Long = _sum override def add(v: Long): Unit = { _sum += v _count += 1 } override def count: Long = _count override def sum: Long = _sum override def avg: Double = _sum.toDouble / _count }
piotr-kalanski/spark-local
src/main/scala/com/datawizards/sparklocal/impl/scala/accumulator/LongAccumulatorAPIScalaImpl.scala
Scala
apache-2.0
1,339
package debop4s.data.slick3.customtypes /** * 암호화된 문자열에 대한 사용자 정의 컬럼 수형 (Custom Column Type) 입니다. * * @author sunghyouk.bae@gmail.com */ case class EncryptedString(text: String) { override def equals(obj: scala.Any): Boolean = { obj != null && obj.isInstanceOf[EncryptedString] && obj.asInstanceOf[EncryptedString].text.equals(text) } }
debop/debop4s
debop4s-data-slick3/src/main/scala/debop4s/data/slick3/customtypes/EncryptedString.scala
Scala
apache-2.0
403
import java.io._ object LinkExtractor2 extends App { val inFile = io.Source.fromFile("./page.html") val outFile = new PrintWriter(new File("./output.txt" )) val anchor = """<a href=\\"([^"]*)\\"[^>]*>""".r val links = for (line <- inFile.getLines) { for (link <- anchor.findAllIn(line)) { outFile.write(s"${link}\\n") } } inFile.close outFile.close }
luzhuomi/learn_you_a_scala_for_great_good
exercise/regex/LinkExtractor2.scala
Scala
apache-2.0
404
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package play.api.mvc import org.specs2.mutable.Specification import play.api.http.HttpConfiguration import play.api.libs.typedmap.TypedEntry import play.api.libs.typedmap.TypedKey import play.api.libs.typedmap.TypedMap import play.api.mvc.request.DefaultRequestFactory import play.api.mvc.request.RemoteConnection import play.api.mvc.request.RequestTarget import play.mvc.Http.RequestBody class RequestSpec extends Specification { "request" should { "have typed attributes" in { "can set and get a single attribute" in { val x = TypedKey[Int]("x") dummyRequest().withAttrs(TypedMap(x -> 3)).attrs(x) must_== 3 } "can set two attributes and get one back" in { val x = TypedKey[Int]("x") val y = TypedKey[String]("y") dummyRequest().withAttrs(TypedMap(x -> 3, y -> "hello")).attrs(y) must_== "hello" } "getting a set attribute should be Some" in { val x = TypedKey[Int]("x") dummyRequest().withAttrs(TypedMap(x -> 5)).attrs.get(x) must beSome(5) } "getting a nonexistent attribute should be None" in { val x = TypedKey[Int]("x") dummyRequest().attrs.get(x) must beNone } "can add single attribute" in { val x = TypedKey[Int]("x") dummyRequest().addAttr(x, 3).attrs(x) must_== 3 } "keep current attributes when adding a new one" in { val x = TypedKey[Int] val y = TypedKey[String] dummyRequest().withAttrs(TypedMap(y -> "hello")).addAttr(x, 3).attrs(y) must_== "hello" } "overrides current attribute value" in { val x = TypedKey[Int] val y = TypedKey[String] val request = dummyRequest() .withAttrs(TypedMap(y -> "hello")) .addAttr(x, 3) .addAttr(y, "white") request.attrs(y) must_== "white" request.attrs(x) must_== 3 } "can add multiple attributes" in { val x = TypedKey[Int]("x") val y = TypedKey[Int]("y") val req = dummyRequest().addAttrs(TypedEntry(x, 3), TypedEntry(y, 4)) req.attrs(x) must_== 3 req.attrs(y) must_== 4 } "keep current attributes when adding multiple ones" in { val x = TypedKey[Int] val y = TypedKey[Int] val z = TypedKey[String] dummyRequest() .withAttrs(TypedMap(z -> "hello")) .addAttrs(TypedEntry(x, 3), TypedEntry(y, 4)) .attrs(z) must_== "hello" } "overrides current attribute value when adding multiple attributes" in { val x = TypedKey[Int] val y = TypedKey[Int] val z = TypedKey[String] val requestHeader = dummyRequest() .withAttrs(TypedMap(z -> "hello")) .addAttrs(TypedEntry(x, 3), TypedEntry(y, 4), TypedEntry(z, "white")) requestHeader.attrs(z) must_== "white" requestHeader.attrs(x) must_== 3 requestHeader.attrs(y) must_== 4 } "can set two attributes and get both back" in { val x = TypedKey[Int]("x") val y = TypedKey[String]("y") val r = dummyRequest().withAttrs(TypedMap(x -> 3, y -> "hello")) r.attrs(x) must_== 3 r.attrs(y) must_== "hello" } "can set two attributes and remove one of them" in { val x = TypedKey[Int]("x") val y = TypedKey[String]("y") val req = dummyRequest().withAttrs(TypedMap(x -> 3, y -> "hello")).removeAttr(x) req.attrs.get(x) must beNone req.attrs(y) must_== "hello" } "can set two attributes and remove both again" in { val x = TypedKey[Int]("x") val y = TypedKey[String]("y") val req = dummyRequest().withAttrs(TypedMap(x -> 3, y -> "hello")).removeAttr(x).removeAttr(y) req.attrs.get(x) must beNone req.attrs.get(y) must beNone } } } private def dummyRequest(requestMethod: String = "GET", requestUri: String = "/", headers: Headers = Headers()) = { new DefaultRequestFactory(HttpConfiguration()).createRequest( connection = RemoteConnection("", false, None), method = "GET", target = RequestTarget(requestUri, "", Map.empty), version = "", headers = headers, attrs = TypedMap.empty, new RequestBody(null) ) } }
wegtam/playframework
core/play/src/test/scala/play/api/mvc/RequestSpec.scala
Scala
apache-2.0
4,368
/* * Copyright (C) 2017 Radicalbit * * This file is part of flink-JPMML * * flink-JPMML is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * flink-JPMML is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with flink-JPMML. If not, see <http://www.gnu.org/licenses/>. */ package io.radicalbit.flink.pmml.scala.utils import org.dmg.pmml.PMML import org.jpmml.model.{ImportFilter, JAXBUtil} import org.xml.sax.InputSource trait PmmlLoaderKit { protected case object Source { val KmeansPmml = "/kmeans.xml" val KmeansPmml41 = "/kmeans41.xml" val KmeansPmml40 = "/kmeans40.xml" val KmeansPmml42 = "/kmeans42.xml" val KmeansPmml32 = "/kmeans41.xml" val KmeansPmmlEmpty = "/kmeans_empty.xml" val KmeansPmmlNoOut = "/kmeans_nooutput.xml" val KmeansPmmlStringFields = "/kmeans_stringfields.xml" val KmeansPmmlNoOutNoTrg = "/kmeans_nooutput_notarget.xml" val NotExistingPath: String = "/not/existing/" + scala.util.Random.nextString(4) } final protected def getPMMLSource(path: String): String = getClass.getResource(path).getPath final protected def getPMMLResource(path: String): PMML = { val source = scala.io.Source.fromURL(getClass.getResource(path)).reader() JAXBUtil.unmarshalPMML(ImportFilter.apply(new InputSource(source))) } }
maocorte/flink-jpmml
flink-jpmml-scala/src/test/scala/io/radicalbit/flink/pmml/scala/utils/PmmlLoaderKit.scala
Scala
agpl-3.0
1,774
package com.bigchange.mllib import org.apache.spark.broadcast.Broadcast import org.apache.spark.mllib.classification.NaiveBayes import org.apache.spark.mllib.evaluation.MulticlassMetrics import org.apache.spark.mllib.feature.{HashingTF, IDF, Word2Vec} import org.apache.spark.mllib.linalg.SparseVector import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import scala.io.Source /** * Created by CHAOJIANG on 2016/9/25 0025. * 新闻数据中提取TF-IDF特征的实例 * 贝叶斯多标签分类器 */ object NewsGroups { val sc = SparkContext.getOrCreate(new SparkConf().setAppName("Newsgroups").setMaster("local")) var swl: Broadcast[List[String]] = null var tokenFiltered: Broadcast[Set[String]] = null def main(args: Array[String]): Unit = { val path = "file:///F:/SmartData-X/DataSet/20news-bydate/20news-bydate-train/*/*" val rdd = sc.wholeTextFiles(path) // 分词方法 val text = rdd.map { case(file, content) => content } // println("text:" + text.count) val whiteSpaceSplite = text.flatMap(t => t.split(" ").map(_.toLowerCase)) // 改进分词效果: 正则表达式切分原始文档, 由于许多不是单词字符 val nonWordSplit = text.flatMap(t => t.split("""\\W+""").map(_.toLowerCase)) // println("nonWordSplit:" + nonWordSplit.count) // 过滤到数字和包含数字的单词 val regex = """[^0-9]*""".r val filterNumbers = nonWordSplit.filter(token => regex.pattern.matcher(token).matches) // println("all words:" + filterNumbers.count) // 剔除停用词 : 有停用词列表可以参考 // val stopWordSList = List("the","a","an","of","or","in","for","by","on","but","is","not","with","as","was","if","they","this","are","and","it","have","from","at","my","be","that","to") val stopWordSList = Source.fromFile("src/main/resources/stopWords").getLines().toList val tokenCountsFiltered = filterNumbers.map((_, 1)).filter{ case (k, v) => k.length <2 }.reduceByKey(_ + _).filter{ case (k, v) => v < 2 }.map(_._1).collect.toSet val stopWordBro = sc.broadcast(stopWordSList) swl = stopWordBro val tokenCountsFilteredBro = sc.broadcast(tokenCountsFiltered) tokenFiltered = tokenCountsFilteredBro // 提取词干: 复杂(walking,walker -> walk), 可以通过标准的NLP方法或者搜索引擎软件实现(NLTK, OpenNLP,Lucene) val tokens = text.map(doc => tokenizer(doc)) // 使用TF-IDF处理文本 // val tFIDF = TFIDFModel(rdd, tokens) val word2Vec = word2VecModel(tokens) } // Word2Vec 词项模型 def word2VecModel(tokens:RDD[Seq[String]]) = { val word2Vec = new Word2Vec().setSeed(42) // 随机种子作为模型训练参数 val word2VecModel = word2Vec.fit(tokens) // 获取相似单词 word2VecModel.findSynonyms("hockey", 20).foreach(println) /*(ecac,1.3701449062603164) (rec,1.3661268683393772) (sport,1.3303370573154512) (hispanic,1.3007176914876915) (tournament,1.2907648102832736) (glens,1.2741494398054731) (ahl,1.232742342012108) (champs,1.2196663349162957) (octopi,1.2021684425511912) (sports,1.193158665614078) (motorcycles,1.1882885074872007) (woofers,1.1733295295882904) (expos,1.1717594763417933) (swedish,1.1709556679736088) (calder,1.1471181568937654) (affiliate,1.1425616781507797) (wabc,1.142089952672354) (woofing,1.1352360429053325) (ncaa,1.1334254449430632) (phils,1.1312980515841555)*/ } // 每篇分词 def tokenizer(line: String) = { line.split("""\\W+""").map(_.toLowerCase) .filter(token => """[^0-9]*""".r.pattern.matcher(token).matches) .filterNot(token => swl.value.contains(token) || tokenFiltered.value.contains(token)) .filter(token => token.length >= 2 ) .toSeq } // TF-IDF 词项模型 def TFIDFModel(rdd:RDD[(String, String)],tokens:RDD[Seq[String]]) = { // 处理成词项形式的文档以向量形式表达: HashingTF - 特征hash把输入的文本的词项映射为词频向量的下标 // 维度参数 println("tfidf fit started !!") val dimension = math.pow(2, 18).toInt val hashingTF = new HashingTF(dimension) val tf = hashingTF.transform(tokens) // 每篇输入文档(词项的序列)映射到一个MLib的Vector对象 .cache() val idf = new IDF().fit(tf) // 获取每个单词的逆向文本频率 val tfidf = idf.transform(tf) println("tfidf fit over!!") val v2 = tfidf.first().asInstanceOf[SparseVector] // 观察整个文档TF-IDF最大和最小权重 val minMaxVals = tfidf.map { v => val sv = v.asInstanceOf[SparseVector] (sv.values.min, sv.values.max) } val globalMinMax = minMaxVals.reduce { case ((min1,max1),(min2, max2)) => (math.min(min1, min2), math.max(max1, max2)) } println("globalMinMax:" + globalMinMax) // 实例运用: 1. 计算文档的相似度(CosSim) 2.作为输入训练一个多标签的分类模型 // 采用NB来处理多分类问题 // 获取类别 val newsGroups = rdd.map { case (file, doc) => file.split("/").takeRight(2).head } val newsGroupsMap = newsGroups.distinct().zipWithIndex.collectAsMap() println("newsGroupsMap: " + newsGroupsMap) // 两个RDD中元素的对应起来 val zipped = newsGroups.zip(tfidf) val trainData = zipped.map { case (topic, vector) => LabeledPoint(newsGroupsMap(topic), vector)} .cache() val model = NaiveBayes.train(trainData, lambda = 0.1 ) // model.save(sc, path = "file:///F:/SmartData-X/DataSet/20news-bydate/nbmodel") // TestData 预处理 val testPath = "file:///F:/SmartData-X/DataSet/20news-bydate/20news-bydate-test/*/*" val testRdd = sc.wholeTextFiles(testPath) val testLabels = testRdd.map { case(file, content) => val topic = file.split("/").takeRight(2).head; newsGroupsMap(topic) } val testTF = testRdd.map{ case (file, doc) => hashingTF.transform(tokenizer(doc)) } // Note: 这里使用训练集的IDF来转换测试集数据成TF-IDF向量 val testTFIDF = idf.transform(testTF) val zippedTest = testLabels.zip(testTFIDF) val test = zippedTest.map { case (topic ,vector) => LabeledPoint(topic, vector) } // 预测 val predictionAndLabels = test.map(p => (model.predict(p.features), p.label)) val accuracy = 1.0 * predictionAndLabels.filter(x => x._1 == x._2).count / test.count val metrics = new MulticlassMetrics(predictionAndLabels) println("accuracy:" + accuracy) println("加权F-指标:" + metrics.weightedFMeasure) // 加权F-指标:0.781142389463205 } }
bigchange/AI
src/main/scala/com/bigchange/mllib/NewsGroups.scala
Scala
apache-2.0
6,739
// // MessagePack for Java // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package org.msgpack.core import java.io.ByteArrayOutputStream import java.math.BigInteger import java.nio.CharBuffer import java.nio.charset.{CodingErrorAction, UnmappableCharacterException} import org.msgpack.core.MessagePack.Code import org.msgpack.value.{Value, Variable} import scala.util.Random /** * Created on 2014/05/07. */ class MessagePackTest extends MessagePackSpec { def isValidUTF8(s: String) = { MessagePack.UTF8.newEncoder().canEncode(s) } def containsUnmappableCharacter(s: String): Boolean = { try { MessagePack.UTF8.newEncoder().onUnmappableCharacter(CodingErrorAction.REPORT).encode(CharBuffer.wrap(s)) false } catch { case e: UnmappableCharacterException => true case _: Exception => false } } "MessagePack" should { "detect fixint values" in { for (i <- 0 until 0x79) { Code.isPosFixInt(i.toByte) shouldBe true } for (i <- 0x80 until 0xFF) { Code.isPosFixInt(i.toByte) shouldBe false } } "detect fixint quickly" in { val N = 100000 val idx = (0 until N).map(x => Random.nextInt(256).toByte).toArray[Byte] time("check fixint", repeat = 100) { block("mask") { var i = 0 var count = 0 while (i < N) { if ((idx(i) & Code.POSFIXINT_MASK) == 0) { count += 1 } i += 1 } } block("mask in func") { var i = 0 var count = 0 while (i < N) { if (Code.isPosFixInt(idx(i))) { count += 1 } i += 1 } } block("shift cmp") { var i = 0 var count = 0 while (i < N) { if ((idx(i) >>> 7) == 0) { count += 1 } i += 1 } } } } "detect neg fix int values" in { for (i <- 0 until 0xe0) { Code.isNegFixInt(i.toByte) shouldBe false } for (i <- 0xe0 until 0xFF) { Code.isNegFixInt(i.toByte) shouldBe true } } def check[A](v: A, pack: MessagePacker => Unit, unpack: MessageUnpacker => A, msgpack: MessagePack = MessagePack.DEFAULT): Unit = { var b: Array[Byte] = null try { val bs = new ByteArrayOutputStream() val packer = msgpack.newPacker(bs) pack(packer) packer.close() b = bs.toByteArray val unpacker = msgpack.newUnpacker(b) val ret = unpack(unpacker) ret shouldBe v } catch { case e: Exception => warn(e.getMessage) if (b != null) { warn(s"packed data (size:${b.length}): ${toHex(b)}") } throw e } } def checkException[A](v: A, pack: MessagePacker => Unit, unpack: MessageUnpacker => A, msgpack: MessagePack = MessagePack.DEFAULT): Unit = { var b: Array[Byte] = null val bs = new ByteArrayOutputStream() val packer = msgpack.newPacker(bs) pack(packer) packer.close() b = bs.toByteArray val unpacker = msgpack.newUnpacker(b) val ret = unpack(unpacker) fail("cannot not reach here") } def checkOverflow[A](v: A, pack: MessagePacker => Unit, unpack: MessageUnpacker => A) { try { checkException[A](v, pack, unpack) } catch { case e: MessageIntegerOverflowException => // OK } } "pack/unpack primitive values" taggedAs ("prim") in { forAll { (v: Boolean) => check(v, _.packBoolean(v), _.unpackBoolean) } forAll { (v: Byte) => check(v, _.packByte(v), _.unpackByte) } forAll { (v: Short) => check(v, _.packShort(v), _.unpackShort) } forAll { (v: Int) => check(v, _.packInt(v), _.unpackInt) } forAll { (v: Float) => check(v, _.packFloat(v), _.unpackFloat) } forAll { (v: Long) => check(v, _.packLong(v), _.unpackLong) } forAll { (v: Double) => check(v, _.packDouble(v), _.unpackDouble) } check(null, _.packNil, { unpacker => unpacker.unpackNil(); null }) } "pack/unpack integer values" taggedAs ("int") in { val sampleData = Seq[Long](Int.MinValue.toLong - 10, -65535, -8191, -1024, -255, -127, -63, -31, -15, -7, -3, -1, 0, 2, 4, 8, 16, 32, 64, 128, 256, 1024, 8192, 65536, Int.MaxValue.toLong + 10) for (v <- sampleData) { check(v, _.packLong(v), _.unpackLong) if (v.isValidInt) { val vi = v.toInt check(vi, _.packInt(vi), _.unpackInt) } else { checkOverflow(v, _.packLong(v), _.unpackInt) } if (v.isValidShort) { val vi = v.toShort check(vi, _.packShort(vi), _.unpackShort) } else { checkOverflow(v, _.packLong(v), _.unpackShort) } if (v.isValidByte) { val vi = v.toByte check(vi, _.packByte(vi), _.unpackByte) } else { checkOverflow(v, _.packLong(v), _.unpackByte) } } } "pack/unpack BigInteger" taggedAs ("bi") in { forAll { (a: Long) => val v = BigInteger.valueOf(a) check(v, _.packBigInteger(v), _.unpackBigInteger) } for (bi <- Seq(BigInteger.valueOf(Long.MaxValue).add(BigInteger.valueOf(1)))) { check(bi, _.packBigInteger(bi), _.unpackBigInteger()) } for (bi <- Seq(BigInteger.valueOf(Long.MaxValue).shiftLeft(10))) { try { checkException(bi, _.packBigInteger(bi), _.unpackBigInteger()) fail("cannot reach here") } catch { case e: IllegalArgumentException => // OK } } } "pack/unpack strings" taggedAs ("string") in { forAll { (v: String) => whenever(isValidUTF8(v)) { check(v, _.packString(v), _.unpackString) } } } "pack/unpack large strings" taggedAs ("large-string") in { // Large string val strLen = Seq(1000, 2000, 10000, 50000, 100000, 500000) for (l <- strLen) { val v: String = Iterator.continually(Random.nextString(l * 10)).find(isValidUTF8).get check(v, _.packString(v), _.unpackString) } } "report errors when packing/unpacking malformed strings" taggedAs ("malformed") in { // TODO produce malformed utf-8 strings in Java8" pending // Create 100 malformed UTF8 Strings val r = new Random(0) val malformedStrings = Iterator.continually { val b = new Array[Byte](10) r.nextBytes(b) b } .filter(b => !isValidUTF8(new String(b))).take(100) for (malformedBytes <- malformedStrings) { // Pack tests val malformed = new String(malformedBytes) try { checkException(malformed, _.packString(malformed), _.unpackString()) } catch { case e: MessageStringCodingException => // OK } try { checkException(malformed, { packer => packer.packRawStringHeader(malformedBytes.length) packer.writePayload(malformedBytes) }, _.unpackString()) } catch { case e: MessageStringCodingException => // OK } } } "report errors when packing/unpacking strings that contain unmappable characters" taggedAs ("unmap") in { val unmappable = Array[Byte](0xfc.toByte, 0x0a.toByte) //val unmappableChar = Array[Char](new Character(0xfc0a).toChar) // Report error on unmappable character val config = new MessagePack.ConfigBuilder() .onMalFormedInput(CodingErrorAction.REPORT) .onUnmappableCharacter(CodingErrorAction.REPORT) .build() val msgpack = new MessagePack(config) for (bytes <- Seq(unmappable)) { When("unpacking") try { checkException(bytes, { packer => packer.packRawStringHeader(bytes.length) packer.writePayload(bytes) }, _.unpackString(), msgpack) } catch { case e: MessageStringCodingException => // OK } // When("packing") // try { // val s = new String(unmappableChar) // checkException(s, _.packString(s), _.unpackString()) // } // catch { // case e:MessageStringCodingException => // OK // } } } "pack/unpack binary" taggedAs ("binary") in { forAll { (v: Array[Byte]) => check(v, { packer => packer.packBinaryHeader(v.length); packer.writePayload(v) }, { unpacker => val len = unpacker.unpackBinaryHeader() val out = new Array[Byte](len) unpacker.readPayload(out, 0, len) out } ) } val len = Seq(1000, 2000, 10000, 50000, 100000, 500000) for (l <- len) { val v = new Array[Byte](l) Random.nextBytes(v) check(v, { packer => packer.packBinaryHeader(v.length); packer.writePayload(v) }, { unpacker => val len = unpacker.unpackBinaryHeader() val out = new Array[Byte](len) unpacker.readPayload(out, 0, len) out } ) } } val testHeaderLength = Seq(1, 2, 4, 8, 16, 17, 32, 64, 255, 256, 1000, 2000, 10000, 50000, 100000, 500000) "pack/unpack arrays" taggedAs ("array") in { forAll { (v: Array[Int]) => check(v, { packer => packer.packArrayHeader(v.length) v.map(packer.packInt(_)) }, { unpacker => val len = unpacker.unpackArrayHeader() val out = new Array[Int](len) for (i <- 0 until v.length) { out(i) = unpacker.unpackInt } out } ) } for (l <- testHeaderLength) { check(l, _.packArrayHeader(l), _.unpackArrayHeader()) } try { checkException(0, _.packArrayHeader(-1), _.unpackArrayHeader) } catch { case e: IllegalArgumentException => // OK } } "pack/unpack maps" taggedAs ("map") in { forAll { (v: Array[Int]) => val m = v.map(i => (i, i.toString)) check(m, { packer => packer.packMapHeader(v.length) m.map { case (k: Int, v: String) => packer.packInt(k) packer.packString(v) } }, { unpacker => val len = unpacker.unpackMapHeader() val b = Seq.newBuilder[(Int, String)] for (i <- 0 until len) { b += ((unpacker.unpackInt, unpacker.unpackString)) } b.result } ) } for (l <- testHeaderLength) { check(l, _.packMapHeader(l), _.unpackMapHeader()) } try { checkException(0, _.packMapHeader(-1), _.unpackMapHeader) } catch { case e: IllegalArgumentException => // OK } } "pack/unpack extension types" taggedAs ("ext") in { forAll { (dataLen: Int, tpe: Byte) => val l = Math.abs(dataLen) whenever(l >= 0) { val ext = new ExtensionTypeHeader(ExtensionTypeHeader.checkedCastToByte(tpe), l) check(ext, _.packExtensionTypeHeader(ext.getType, ext.getLength), _.unpackExtensionTypeHeader()) } } for (l <- testHeaderLength) { val ext = new ExtensionTypeHeader(ExtensionTypeHeader.checkedCastToByte(Random.nextInt(128)), l) check(ext, _.packExtensionTypeHeader(ext.getType, ext.getLength), _.unpackExtensionTypeHeader()) } } "pack/unpack maps in lists" in { val aMap = List(Map("f" -> "x")) check(aMap, { packer => packer.packArrayHeader(aMap.size) for (m <- aMap) { packer.packMapHeader(m.size) for ((k, v) <- m) { packer.packString(k) packer.packString(v) } } }, { unpacker => val v = new Variable() unpacker.unpackValue(v) import scala.collection.JavaConversions._ v.asArrayValue().map { m => val mv = m.asMapValue() val kvs = mv.getKeyValueArray kvs.grouped(2).map({ kvp: Array[Value] => val k = kvp(0) val v = kvp(1) (k.asStringValue().asString, v.asStringValue().asString) }).toMap }.toList }) } } }
suzukaze/msgpack-java
msgpack-core/src/test/scala/org/msgpack/core/MessagePackTest.scala
Scala
apache-2.0
13,102
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package reforest.util import java.util.concurrent.{ScheduledFuture, ScheduledThreadPoolExecutor, TimeUnit} import org.apache.spark.{SparkContext, TaskContext} /** * Instrumentation for the Garbage Collector in order to retrieve useful information about the amount of memory * used. */ trait GCInstrumented extends Serializable { /** * It starts the GC at periodic intervals. */ def start() /** * It stops the periodic invocation of the GC */ def stop() /** * It calls the GC */ def gc() /** * It calls the GC, executes the given function and calls the GC again * @param f the function to be executed */ def gc(f: => Any) /** * It executes the GC on all the machines */ def gcALL() /** * It returns true if the instrumentation is active (i.e. it is calling the GC) * @return true if the GC is instrumented */ def valid: Boolean } class GCInstrumentedEmpty extends GCInstrumented { def gc() = {} def gc(f: => Any) = f def gcALL() = {} val valid = false override def start() = {} override def stop() = {} } class GCInstrumentedFull(@transient val sc: SparkContext) extends GCInstrumented { val dataForGC = sc.parallelize(Seq.fill(10000)(0)).cache() override def start() = { dataForGC.foreachPartition(t => GCRunner.start()) } override def stop() = { dataForGC.foreachPartition(t => GCRunner.stop()) } def gc() = { System.gc() } def gc(f: => Any) = { gc f gc } def gcALL() = { dataForGC.foreachPartition(t => System.gc()) } val valid = true } object GCRunner { val ex = new ScheduledThreadPoolExecutor(1) var partitionIndex: Option[Int] = Option.empty var f: Option[ScheduledFuture[_]] = Option.empty def start() = { this.synchronized({ val taskId = TaskContext.getPartitionId() if (partitionIndex.isEmpty) { partitionIndex = Some(taskId) val task = new Runnable { def run() = System.gc() } f = Some(ex.scheduleAtFixedRate(task, 1, 1, TimeUnit.SECONDS)) f.get.cancel(false) } }) } def stop() = { if (f.isDefined) { f.get.cancel(true) f = Option.empty partitionIndex = Option.empty } } }
alessandrolulli/reforest
src/main/scala/reforest/util/GCInstrumented.scala
Scala
apache-2.0
3,209
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.cluster import akka.util.ByteString import com.typesafe.scalalogging.LazyLogging import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.CreateMode import org.squbs.cluster.test.{ZkClusterMultiActorSystemTestKit, ZkClusterTestHelper} import scala.language.implicitConversions class ZkClusterInitTest extends ZkClusterMultiActorSystemTestKit("ZkClusterInitTest") with LazyLogging with ZkClusterTestHelper { val par1 = ByteString("myPar1") val par2 = ByteString("myPar2") val par3 = ByteString("myPar3") implicit val log = logger implicit def string2ByteArray(s: String): Array[Byte] = s.toCharArray map (c => c.toByte) implicit def byteArray2String(array: Array[Byte]): String = array.map(_.toChar).mkString override def beforeAll(): Unit = { // Don't need to start the cluster for now // We preset the data in Zookeeper instead. val zkClient = CuratorFrameworkFactory.newClient( zkConfig.getString("zkCluster.connectionString"), new ExponentialBackoffRetry(ZkCluster.DEFAULT_BASE_SLEEP_TIME_MS, ZkCluster.DEFAULT_MAX_RETRIES) ) zkClient.start() zkClient.blockUntilConnected() implicit val zkClientWithNS = zkClient.usingNamespace(zkConfig.getString("zkCluster.namespace")) guarantee("/leader", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/members", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments/segment-0", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}", Some("myPar1"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}", Some("myPar2"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}", Some("myPar3"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/$$size", Some(3), CreateMode.PERSISTENT) zkClient.close() } "ZkCluster" should "list the partitions" in { startCluster() zkClusterExts foreach { case (_, ext) => ext tell (ZkListPartitions(ext.zkAddress), self) expectMsgType[ZkPartitions](timeout) } } "ZkCluster" should "load persisted partition information and sync across the cluster" in { zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par1), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par2), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par3), self) expectMsgType[ZkPartition](timeout).members should have size 3 } } "ZkCluster" should "list all the members across the cluster" in { val members = zkClusterExts.map(_._2.zkAddress).toSet zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryMembership, self) expectMsgType[ZkMembership](timeout).members should be (members) } } }
SarathChandran/squbs
squbs-zkcluster/src/test/scala/org/squbs/cluster/ZkClusterInitTest.scala
Scala
apache-2.0
4,238
/* * Copyright 2014-2020 Rik van der Kleij * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package intellij.haskell.editor.formatter.settings import com.intellij.application.options.SmartIndentOptionsEditor import com.intellij.lang.Language import com.intellij.psi.codeStyle.LanguageCodeStyleSettingsProvider.SettingsType import com.intellij.psi.codeStyle.{CommonCodeStyleSettings, LanguageCodeStyleSettingsProvider} import intellij.haskell.HaskellLanguage import org.jetbrains.annotations.NotNull class HaskellLanguageCodeStyleSettingsProvider extends LanguageCodeStyleSettingsProvider { @NotNull override def getLanguage: Language = { HaskellLanguage.Instance } override def customizeDefaults(commonSettings: CommonCodeStyleSettings, indentOptions: CommonCodeStyleSettings.IndentOptions): Unit = { indentOptions.INDENT_SIZE = 2 indentOptions.CONTINUATION_INDENT_SIZE = 4 indentOptions.TAB_SIZE = 2 indentOptions.USE_TAB_CHARACTER = false } override def getIndentOptionsEditor: SmartIndentOptionsEditor = { new SmartIndentOptionsEditor(this) } override def getCodeSample(settingsType: SettingsType): String = """-- Reformatting is done externally by Ormolu. |-- Setting code style options here has no effect. """.stripMargin }
rikvdkleij/intellij-haskell
src/main/scala/intellij/haskell/editor/formatter/settings/HaskellLanguageCodeStyleSettingsProvider.scala
Scala
apache-2.0
1,799
import io.gatling.core.Predef._ import io.gatling.http.Predef._ import scala.concurrent.duration._ object PatientMenu { def graphs(patientId: String) = exec(http("Patient graphs").get("/patient/" + patientId + "/graphs")) }
silverbullet-dk/opentele-performance-tests
src/test/scala/user-files/simulations/processes/clinician/PatientMenu.scala
Scala
apache-2.0
227
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.recommendation import java.io.File import java.util.Random import scala.collection.JavaConverters._ import scala.collection.mutable import scala.collection.mutable.{ArrayBuffer, WrappedArray} import com.github.fommil.netlib.BLAS.{getInstance => blas} import org.apache.commons.io.FileUtils import org.apache.commons.io.filefilter.TrueFileFilter import org.scalatest.BeforeAndAfterEach import org.apache.spark._ import org.apache.spark.internal.Logging import org.apache.spark.ml.linalg.Vectors import org.apache.spark.ml.recommendation.ALS._ import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils} import org.apache.spark.ml.util.TestingUtils._ import org.apache.spark.mllib.util.MLlibTestSparkContext import org.apache.spark.rdd.RDD import org.apache.spark.scheduler.{SparkListener, SparkListenerStageCompleted} import org.apache.spark.sql.{DataFrame, Encoder, Row, SparkSession} import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.functions.{col, lit} import org.apache.spark.sql.streaming.StreamingQueryException import org.apache.spark.sql.types._ import org.apache.spark.storage.StorageLevel import org.apache.spark.util.Utils class ALSSuite extends MLTest with DefaultReadWriteTest with Logging { override def beforeAll(): Unit = { super.beforeAll() sc.setCheckpointDir(tempDir.getAbsolutePath) } override def afterAll(): Unit = { super.afterAll() } test("LocalIndexEncoder") { val random = new Random for (numBlocks <- Seq(1, 2, 5, 10, 20, 50, 100)) { val encoder = new LocalIndexEncoder(numBlocks) val maxLocalIndex = Int.MaxValue / numBlocks val tests = Seq.fill(5)((random.nextInt(numBlocks), random.nextInt(maxLocalIndex))) ++ Seq((0, 0), (numBlocks - 1, maxLocalIndex)) tests.foreach { case (blockId, localIndex) => val err = s"Failed with numBlocks=$numBlocks, blockId=$blockId, and localIndex=$localIndex." val encoded = encoder.encode(blockId, localIndex) assert(encoder.blockId(encoded) === blockId, err) assert(encoder.localIndex(encoded) === localIndex, err) } } } test("normal equation construction") { val k = 2 val ne0 = new NormalEquation(k) .add(Array(1.0f, 2.0f), 3.0) .add(Array(4.0f, 5.0f), 12.0, 2.0) // weighted assert(ne0.k === k) assert(ne0.triK === k * (k + 1) / 2) // NumPy code that computes the expected values: // A = np.matrix("1 2; 4 5") // b = np.matrix("3; 6") // C = np.matrix(np.diag([1, 2])) // ata = A.transpose() * C * A // atb = A.transpose() * C * b assert(Vectors.dense(ne0.ata) ~== Vectors.dense(33.0, 42.0, 54.0) relTol 1e-8) assert(Vectors.dense(ne0.atb) ~== Vectors.dense(51.0, 66.0) relTol 1e-8) val ne1 = new NormalEquation(2) .add(Array(7.0f, 8.0f), 9.0) ne0.merge(ne1) // NumPy code that computes the expected values: // A = np.matrix("1 2; 4 5; 7 8") // b = np.matrix("3; 6; 9") // C = np.matrix(np.diag([1, 2, 1])) // ata = A.transpose() * C * A // atb = A.transpose() * C * b assert(Vectors.dense(ne0.ata) ~== Vectors.dense(82.0, 98.0, 118.0) relTol 1e-8) assert(Vectors.dense(ne0.atb) ~== Vectors.dense(114.0, 138.0) relTol 1e-8) intercept[IllegalArgumentException] { ne0.add(Array(1.0f), 2.0) } intercept[IllegalArgumentException] { ne0.add(Array(1.0f, 2.0f, 3.0f), 4.0) } intercept[IllegalArgumentException] { ne0.add(Array(1.0f, 2.0f), 0.0, -1.0) } intercept[IllegalArgumentException] { val ne2 = new NormalEquation(3) ne0.merge(ne2) } ne0.reset() assert(ne0.ata.forall(_ == 0.0)) assert(ne0.atb.forall(_ == 0.0)) } test("CholeskySolver") { val k = 2 val ne0 = new NormalEquation(k) .add(Array(1.0f, 2.0f), 4.0) .add(Array(1.0f, 3.0f), 9.0) .add(Array(1.0f, 4.0f), 16.0) val ne1 = new NormalEquation(k) .merge(ne0) val chol = new CholeskySolver val x0 = chol.solve(ne0, 0.0).map(_.toDouble) // NumPy code that computes the expected solution: // A = np.matrix("1 2; 1 3; 1 4") // b = b = np.matrix("3; 6") // x0 = np.linalg.lstsq(A, b)[0] assert(Vectors.dense(x0) ~== Vectors.dense(-8.333333, 6.0) relTol 1e-6) assert(ne0.ata.forall(_ == 0.0)) assert(ne0.atb.forall(_ == 0.0)) val x1 = chol.solve(ne1, 1.5).map(_.toDouble) // NumPy code that computes the expected solution, where lambda is scaled by n: // x0 = np.linalg.solve(A.transpose() * A + 1.5 * np.eye(2), A.transpose() * b) assert(Vectors.dense(x1) ~== Vectors.dense(-0.1155556, 3.28) relTol 1e-6) } test("RatingBlockBuilder") { val emptyBuilder = new RatingBlockBuilder[Int]() assert(emptyBuilder.size === 0) val emptyBlock = emptyBuilder.build() assert(emptyBlock.srcIds.isEmpty) assert(emptyBlock.dstIds.isEmpty) assert(emptyBlock.ratings.isEmpty) val builder0 = new RatingBlockBuilder() .add(Rating(0, 1, 2.0f)) .add(Rating(3, 4, 5.0f)) assert(builder0.size === 2) val builder1 = new RatingBlockBuilder() .add(Rating(6, 7, 8.0f)) .merge(builder0.build()) assert(builder1.size === 3) val block = builder1.build() val ratings = Seq.tabulate(block.size) { i => (block.srcIds(i), block.dstIds(i), block.ratings(i)) }.toSet assert(ratings === Set((0, 1, 2.0f), (3, 4, 5.0f), (6, 7, 8.0f))) } test("UncompressedInBlock") { val encoder = new LocalIndexEncoder(10) val uncompressed = new UncompressedInBlockBuilder[Int](encoder) .add(0, Array(1, 0, 2), Array(0, 1, 4), Array(1.0f, 2.0f, 3.0f)) .add(1, Array(3, 0), Array(2, 5), Array(4.0f, 5.0f)) .build() assert(uncompressed.length === 5) val records = Seq.tabulate(uncompressed.length) { i => val dstEncodedIndex = uncompressed.dstEncodedIndices(i) val dstBlockId = encoder.blockId(dstEncodedIndex) val dstLocalIndex = encoder.localIndex(dstEncodedIndex) (uncompressed.srcIds(i), dstBlockId, dstLocalIndex, uncompressed.ratings(i)) }.toSet val expected = Set((1, 0, 0, 1.0f), (0, 0, 1, 2.0f), (2, 0, 4, 3.0f), (3, 1, 2, 4.0f), (0, 1, 5, 5.0f)) assert(records === expected) val compressed = uncompressed.compress() assert(compressed.size === 5) assert(compressed.srcIds.toSeq === Seq(0, 1, 2, 3)) assert(compressed.dstPtrs.toSeq === Seq(0, 2, 3, 4, 5)) var decompressed = ArrayBuffer.empty[(Int, Int, Int, Float)] var i = 0 while (i < compressed.srcIds.length) { var j = compressed.dstPtrs(i) while (j < compressed.dstPtrs(i + 1)) { val dstEncodedIndex = compressed.dstEncodedIndices(j) val dstBlockId = encoder.blockId(dstEncodedIndex) val dstLocalIndex = encoder.localIndex(dstEncodedIndex) decompressed += ((compressed.srcIds(i), dstBlockId, dstLocalIndex, compressed.ratings(j))) j += 1 } i += 1 } assert(decompressed.toSet === expected) } test("CheckedCast") { val checkedCast = new ALS().checkedCast val df = spark.range(1) withClue("Valid Integer Ids") { df.select(checkedCast(lit(123))).collect() } withClue("Valid Long Ids") { df.select(checkedCast(lit(1231L))).collect() } withClue("Valid Decimal Ids") { df.select(checkedCast(lit(123).cast(DecimalType(15, 2)))).collect() } withClue("Valid Double Ids") { df.select(checkedCast(lit(123.0))).collect() } val msg = "either out of Integer range or contained a fractional part" withClue("Invalid Long: out of range") { val e: SparkException = intercept[SparkException] { df.select(checkedCast(lit(1231000000000L))).collect() } assert(e.getMessage.contains(msg)) } withClue("Invalid Decimal: out of range") { val e: SparkException = intercept[SparkException] { df.select(checkedCast(lit(1231000000000.0).cast(DecimalType(15, 2)))).collect() } assert(e.getMessage.contains(msg)) } withClue("Invalid Decimal: fractional part") { val e: SparkException = intercept[SparkException] { df.select(checkedCast(lit(123.1).cast(DecimalType(15, 2)))).collect() } assert(e.getMessage.contains(msg)) } withClue("Invalid Double: out of range") { val e: SparkException = intercept[SparkException] { df.select(checkedCast(lit(1231000000000.0))).collect() } assert(e.getMessage.contains(msg)) } withClue("Invalid Double: fractional part") { val e: SparkException = intercept[SparkException] { df.select(checkedCast(lit(123.1))).collect() } assert(e.getMessage.contains(msg)) } withClue("Invalid Type") { val e: SparkException = intercept[SparkException] { df.select(checkedCast(lit("123.1"))).collect() } assert(e.getMessage.contains("was not numeric")) } } /** * Generates an explicit feedback dataset for testing ALS. * @param numUsers number of users * @param numItems number of items * @param rank rank * @param noiseStd the standard deviation of additive Gaussian noise on training data * @param seed random seed * @return (training, test) */ def genExplicitTestData( numUsers: Int, numItems: Int, rank: Int, noiseStd: Double = 0.0, seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = { val trainingFraction = 0.6 val testFraction = 0.3 val totalFraction = trainingFraction + testFraction val random = new Random(seed) val userFactors = genFactors(numUsers, rank, random) val itemFactors = genFactors(numItems, rank, random) val training = ArrayBuffer.empty[Rating[Int]] val test = ArrayBuffer.empty[Rating[Int]] for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) { val x = random.nextDouble() if (x < totalFraction) { val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1) if (x < trainingFraction) { val noise = noiseStd * random.nextGaussian() training += Rating(userId, itemId, rating + noise.toFloat) } else { test += Rating(userId, itemId, rating) } } } logInfo(s"Generated an explicit feedback dataset with ${training.size} ratings for training " + s"and ${test.size} for test.") (sc.parallelize(training, 2), sc.parallelize(test, 2)) } /** * Generates an implicit feedback dataset for testing ALS. * @param numUsers number of users * @param numItems number of items * @param rank rank * @param noiseStd the standard deviation of additive Gaussian noise on training data * @param seed random seed * @return (training, test) */ def genImplicitTestData( numUsers: Int, numItems: Int, rank: Int, noiseStd: Double = 0.0, seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = { ALSSuite.genImplicitTestData(sc, numUsers, numItems, rank, noiseStd, seed) } /** * Generates random user/item factors, with i.i.d. values drawn from U(a, b). * @param size number of users/items * @param rank number of features * @param random random number generator * @param a min value of the support (default: -1) * @param b max value of the support (default: 1) * @return a sequence of (ID, factors) pairs */ private def genFactors( size: Int, rank: Int, random: Random, a: Float = -1.0f, b: Float = 1.0f): Seq[(Int, Array[Float])] = { ALSSuite.genFactors(size, rank, random, a, b) } /** * Train ALS using the given training set and parameters * @param training training dataset * @param rank rank of the matrix factorization * @param maxIter max number of iterations * @param regParam regularization constant * @param implicitPrefs whether to use implicit preference * @param numUserBlocks number of user blocks * @param numItemBlocks number of item blocks * @return a trained ALSModel */ def trainALS( training: RDD[Rating[Int]], rank: Int, maxIter: Int, regParam: Double, implicitPrefs: Boolean = false, numUserBlocks: Int = 2, numItemBlocks: Int = 3): ALSModel = { val spark = this.spark import spark.implicits._ val als = new ALS() .setRank(rank) .setRegParam(regParam) .setImplicitPrefs(implicitPrefs) .setNumUserBlocks(numUserBlocks) .setNumItemBlocks(numItemBlocks) .setSeed(0) als.fit(training.toDF()) } /** * Test ALS using the given training/test splits and parameters. * @param training training dataset * @param test test dataset * @param rank rank of the matrix factorization * @param maxIter max number of iterations * @param regParam regularization constant * @param implicitPrefs whether to use implicit preference * @param numUserBlocks number of user blocks * @param numItemBlocks number of item blocks * @param targetRMSE target test RMSE */ def testALS( training: RDD[Rating[Int]], test: RDD[Rating[Int]], rank: Int, maxIter: Int, regParam: Double, implicitPrefs: Boolean = false, numUserBlocks: Int = 2, numItemBlocks: Int = 3, targetRMSE: Double = 0.05): Unit = { val spark = this.spark import spark.implicits._ val als = new ALS() .setRank(rank) .setRegParam(regParam) .setImplicitPrefs(implicitPrefs) .setNumUserBlocks(numUserBlocks) .setNumItemBlocks(numItemBlocks) .setSeed(0) val alpha = als.getAlpha val model = als.fit(training.toDF()) testTransformerByGlobalCheckFunc[Rating[Int]](test.toDF(), model, "rating", "prediction") { case rows: Seq[Row] => val predictions = rows.map(row => (row.getFloat(0).toDouble, row.getFloat(1).toDouble)) val rmse = if (implicitPrefs) { // TODO: Use a better (rank-based?) evaluation metric for implicit feedback. // We limit the ratings and the predictions to interval [0, 1] and compute the // weighted RMSE with the confidence scores as weights. val (totalWeight, weightedSumSq) = predictions.map { case (rating, prediction) => val confidence = 1.0 + alpha * math.abs(rating) val rating01 = math.max(math.min(rating, 1.0), 0.0) val prediction01 = math.max(math.min(prediction, 1.0), 0.0) val err = prediction01 - rating01 (confidence, confidence * err * err) }.reduce[(Double, Double)] { case ((c0, e0), (c1, e1)) => (c0 + c1, e0 + e1) } math.sqrt(weightedSumSq / totalWeight) } else { val errorSquares = predictions.map { case (rating, prediction) => val err = rating - prediction err * err } val mse = errorSquares.sum / errorSquares.length math.sqrt(mse) } logInfo(s"Test RMSE is $rmse.") assert(rmse < targetRMSE) } MLTestingUtils.checkCopyAndUids(als, model) } test("exact rank-1 matrix") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 1) testALS(training, test, maxIter = 1, rank = 1, regParam = 1e-5, targetRMSE = 0.001) testALS(training, test, maxIter = 1, rank = 2, regParam = 1e-5, targetRMSE = 0.001) } test("approximate rank-1 matrix") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 1, noiseStd = 0.01) testALS(training, test, maxIter = 2, rank = 1, regParam = 0.01, targetRMSE = 0.02) testALS(training, test, maxIter = 2, rank = 2, regParam = 0.01, targetRMSE = 0.02) } test("approximate rank-2 matrix") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, targetRMSE = 0.03) testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03) } test("different block settings") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) for ((numUserBlocks, numItemBlocks) <- Seq((1, 1), (1, 2), (2, 1), (2, 2))) { testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03, numUserBlocks = numUserBlocks, numItemBlocks = numItemBlocks) } } test("more blocks than ratings") { val (training, test) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1) testALS(training, test, maxIter = 2, rank = 1, regParam = 1e-4, targetRMSE = 0.002, numItemBlocks = 5, numUserBlocks = 5) } test("implicit feedback") { val (training, test) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, implicitPrefs = true, targetRMSE = 0.3) } test("implicit feedback regression") { val trainingWithNeg = sc.parallelize(Array(Rating(0, 0, 1), Rating(1, 1, 1), Rating(0, 1, -3))) val trainingWithZero = sc.parallelize(Array(Rating(0, 0, 1), Rating(1, 1, 1), Rating(0, 1, 0))) val modelWithNeg = trainALS(trainingWithNeg, rank = 1, maxIter = 5, regParam = 0.01, implicitPrefs = true) val modelWithZero = trainALS(trainingWithZero, rank = 1, maxIter = 5, regParam = 0.01, implicitPrefs = true) val userFactorsNeg = modelWithNeg.userFactors val itemFactorsNeg = modelWithNeg.itemFactors val userFactorsZero = modelWithZero.userFactors val itemFactorsZero = modelWithZero.itemFactors assert(userFactorsNeg.intersect(userFactorsZero).count() == 0) assert(itemFactorsNeg.intersect(itemFactorsZero).count() == 0) } test("using generic ID types") { val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) val longRatings = ratings.map(r => Rating(r.user.toLong, r.item.toLong, r.rating)) val (longUserFactors, _) = ALS.train(longRatings, rank = 2, maxIter = 4, seed = 0) assert(longUserFactors.first()._1.getClass === classOf[Long]) val strRatings = ratings.map(r => Rating(r.user.toString, r.item.toString, r.rating)) val (strUserFactors, _) = ALS.train(strRatings, rank = 2, maxIter = 4, seed = 0) assert(strUserFactors.first()._1.getClass === classOf[String]) } test("nonnegative constraint") { val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) val (userFactors, itemFactors) = ALS.train(ratings, rank = 2, maxIter = 4, nonnegative = true, seed = 0) def isNonnegative(factors: RDD[(Int, Array[Float])]): Boolean = { factors.values.map { _.forall(_ >= 0.0) }.reduce(_ && _) } assert(isNonnegative(userFactors)) assert(isNonnegative(itemFactors)) // TODO: Validate the solution. } test("als partitioner is a projection") { for (p <- Seq(1, 10, 100, 1000)) { val part = new ALSPartitioner(p) var k = 0 while (k < p) { assert(k === part.getPartition(k)) assert(k === part.getPartition(k.toLong)) k += 1 } } } test("partitioner in returned factors") { val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) val (userFactors, itemFactors) = ALS.train( ratings, rank = 2, maxIter = 4, numUserBlocks = 3, numItemBlocks = 4, seed = 0) for ((tpe, factors) <- Seq(("User", userFactors), ("Item", itemFactors))) { assert(userFactors.partitioner.isDefined, s"$tpe factors should have partitioner.") val part = userFactors.partitioner.get userFactors.mapPartitionsWithIndex { (idx, items) => items.foreach { case (id, _) => if (part.getPartition(id) != idx) { throw new SparkException(s"$tpe with ID $id should not be in partition $idx.") } } Iterator.empty }.count() } } test("als with large number of iterations") { val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1) ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2, seed = 0) ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2, implicitPrefs = true, seed = 0) } test("read/write") { val spark = this.spark import spark.implicits._ import ALSSuite._ val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1) def getFactors(df: DataFrame): Set[(Int, Array[Float])] = { df.select("id", "features").collect().map { case r => (r.getInt(0), r.getAs[Array[Float]](1)) }.toSet } def checkModelData(model: ALSModel, model2: ALSModel): Unit = { assert(model.rank === model2.rank) assert(getFactors(model.userFactors) === getFactors(model2.userFactors)) assert(getFactors(model.itemFactors) === getFactors(model2.itemFactors)) } val als = new ALS() testEstimatorAndModelReadWrite(als, ratings.toDF(), allEstimatorParamSettings, allModelParamSettings, checkModelData) } private def checkNumericTypesALS( estimator: ALS, spark: SparkSession, column: String, baseType: NumericType) (check: (ALSModel, ALSModel) => Unit) (check2: (ALSModel, ALSModel, DataFrame, Encoder[_]) => Unit): Unit = { val dfs = genRatingsDFWithNumericCols(spark, column) val maybeDf = dfs.find { case (numericTypeWithEncoder, _) => numericTypeWithEncoder.numericType == baseType } assert(maybeDf.isDefined) val df = maybeDf.get._2 val expected = estimator.fit(df) val actuals = dfs.map(t => (t, estimator.fit(t._2))) actuals.foreach { case (_, actual) => check(expected, actual) } actuals.foreach { case (t, actual) => check2(expected, actual, t._2, t._1.encoder) } val baseDF = dfs.find(_._1.numericType == baseType).get._2 val others = baseDF.columns.toSeq.diff(Seq(column)).map(col) val cols = Seq(col(column).cast(StringType)) ++ others val strDF = baseDF.select(cols: _*) val thrown = intercept[IllegalArgumentException] { estimator.fit(strDF) } assert(thrown.getMessage.contains( s"$column must be of type numeric but was actually of type string")) } private class NumericTypeWithEncoder[A](val numericType: NumericType) (implicit val encoder: Encoder[(A, Int, Double)]) private def genRatingsDFWithNumericCols( spark: SparkSession, column: String) = { import testImplicits._ val df = spark.createDataFrame(Seq( (0, 10, 1.0), (1, 20, 2.0), (2, 30, 3.0), (3, 40, 4.0), (4, 50, 5.0) )).toDF("user", "item", "rating") val others = df.columns.toSeq.diff(Seq(column)).map(col) val types = Seq(new NumericTypeWithEncoder[Short](ShortType), new NumericTypeWithEncoder[Long](LongType), new NumericTypeWithEncoder[Int](IntegerType), new NumericTypeWithEncoder[Float](FloatType), new NumericTypeWithEncoder[Byte](ByteType), new NumericTypeWithEncoder[Double](DoubleType), new NumericTypeWithEncoder[Decimal](DecimalType(10, 0))(ExpressionEncoder()) ) types.map { t => val cols = Seq(col(column).cast(t.numericType)) ++ others t -> df.select(cols: _*) } } test("input type validation") { val spark = this.spark import spark.implicits._ // check that ALS can handle all numeric types for rating column // and user/item columns (when the user/item ids are within Int range) val als = new ALS().setMaxIter(1).setRank(1) Seq(("user", IntegerType), ("item", IntegerType), ("rating", FloatType)).foreach { case (colName, sqlType) => checkNumericTypesALS(als, spark, colName, sqlType) { (ex, act) => ex.userFactors.first().getSeq[Float](1) === act.userFactors.first().getSeq[Float](1) } { (ex, act, df, enc) => val expected = ex.transform(df).selectExpr("prediction") .first().getFloat(0) testTransformerByGlobalCheckFunc(df, act, "prediction") { case rows: Seq[Row] => expected ~== rows.head.getFloat(0) absTol 1e-6 }(enc) } } // check user/item ids falling outside of Int range val big = Int.MaxValue.toLong + 1 val small = Int.MinValue.toDouble - 1 val df = Seq( (0, 0L, 0d, 1, 1L, 1d, 3.0), (0, big, small, 0, big, small, 2.0), (1, 1L, 1d, 0, 0L, 0d, 5.0) ).toDF("user", "user_big", "user_small", "item", "item_big", "item_small", "rating") val msg = "either out of Integer range or contained a fractional part" withClue("fit should fail when ids exceed integer range. ") { assert(intercept[SparkException] { als.fit(df.select(df("user_big").as("user"), df("item"), df("rating"))) }.getCause.getMessage.contains(msg)) assert(intercept[SparkException] { als.fit(df.select(df("user_small").as("user"), df("item"), df("rating"))) }.getCause.getMessage.contains(msg)) assert(intercept[SparkException] { als.fit(df.select(df("item_big").as("item"), df("user"), df("rating"))) }.getCause.getMessage.contains(msg)) assert(intercept[SparkException] { als.fit(df.select(df("item_small").as("item"), df("user"), df("rating"))) }.getCause.getMessage.contains(msg)) } withClue("transform should fail when ids exceed integer range. ") { val model = als.fit(df) def testTransformIdExceedsIntRange[A : Encoder](dataFrame: DataFrame): Unit = { val e1 = intercept[SparkException] { model.transform(dataFrame).first } TestUtils.assertExceptionMsg(e1, msg) val e2 = intercept[StreamingQueryException] { testTransformer[A](dataFrame, model, "prediction") { _ => } } TestUtils.assertExceptionMsg(e2, msg) } testTransformIdExceedsIntRange[(Long, Int)](df.select(df("user_big").as("user"), df("item"))) testTransformIdExceedsIntRange[(Double, Int)](df.select(df("user_small").as("user"), df("item"))) testTransformIdExceedsIntRange[(Long, Int)](df.select(df("item_big").as("item"), df("user"))) testTransformIdExceedsIntRange[(Double, Int)](df.select(df("item_small").as("item"), df("user"))) } } test("SPARK-18268: ALS with empty RDD should fail with better message") { val ratings = sc.parallelize(Array.empty[Rating[Int]]) intercept[IllegalArgumentException] { ALS.train(ratings) } } test("ALS cold start user/item prediction strategy") { val spark = this.spark import spark.implicits._ import org.apache.spark.sql.functions._ val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1) val data = ratings.toDF val knownUser = data.select(max("user")).as[Int].first() val unknownUser = knownUser + 10 val knownItem = data.select(max("item")).as[Int].first() val unknownItem = knownItem + 20 val test = Seq( (unknownUser, unknownItem, true), (knownUser, unknownItem, true), (unknownUser, knownItem, true), (knownUser, knownItem, false) ).toDF("user", "item", "expectedIsNaN") val als = new ALS().setMaxIter(1).setRank(1) // default is 'nan' val defaultModel = als.fit(data) testTransformer[(Int, Int, Boolean)](test, defaultModel, "expectedIsNaN", "prediction") { case Row(expectedIsNaN: Boolean, prediction: Float) => assert(prediction.isNaN === expectedIsNaN) } // check 'drop' strategy should filter out rows with unknown users/items val defaultPrediction = defaultModel.transform(test).select("prediction") .as[Float].filter(!_.isNaN).first() testTransformerByGlobalCheckFunc[(Int, Int, Boolean)](test, defaultModel.setColdStartStrategy("drop"), "prediction") { case rows: Seq[Row] => val dropPredictions = rows.map(_.getFloat(0)) assert(dropPredictions.length == 1) assert(!dropPredictions.head.isNaN) assert(dropPredictions.head ~== defaultPrediction relTol 1e-14) } } test("case insensitive cold start param value") { val spark = this.spark import spark.implicits._ val (ratings, _) = genExplicitTestData(numUsers = 2, numItems = 2, rank = 1) val data = ratings.toDF val model = new ALS().fit(data) Seq("nan", "NaN", "Nan", "drop", "DROP", "Drop").foreach { s => testTransformer[Rating[Int]](data, model.setColdStartStrategy(s), "prediction") { _ => } } } private def getALSModel = { val spark = this.spark import spark.implicits._ val userFactors = Seq( (0, Array(6.0f, 4.0f)), (1, Array(3.0f, 4.0f)), (2, Array(3.0f, 6.0f)) ).toDF("id", "features") val itemFactors = Seq( (3, Array(5.0f, 6.0f)), (4, Array(6.0f, 2.0f)), (5, Array(3.0f, 6.0f)), (6, Array(4.0f, 1.0f)) ).toDF("id", "features") val als = new ALS().setRank(2) new ALSModel(als.uid, als.getRank, userFactors, itemFactors) .setUserCol("user") .setItemCol("item") } test("recommendForAllUsers with k <, = and > num_items") { val model = getALSModel val numUsers = model.userFactors.count val numItems = model.itemFactors.count val expected = Map( 0 -> Seq((3, 54f), (4, 44f), (5, 42f), (6, 28f)), 1 -> Seq((3, 39f), (5, 33f), (4, 26f), (6, 16f)), 2 -> Seq((3, 51f), (5, 45f), (4, 30f), (6, 18f)) ) Seq(2, 4, 6).foreach { k => val n = math.min(k, numItems).toInt val expectedUpToN = expected.mapValues(_.slice(0, n)) val topItems = model.recommendForAllUsers(k) assert(topItems.count() == numUsers) assert(topItems.columns.contains("user")) checkRecommendations(topItems, expectedUpToN, "item") } } test("recommendForAllItems with k <, = and > num_users") { val model = getALSModel val numUsers = model.userFactors.count val numItems = model.itemFactors.count val expected = Map( 3 -> Seq((0, 54f), (2, 51f), (1, 39f)), 4 -> Seq((0, 44f), (2, 30f), (1, 26f)), 5 -> Seq((2, 45f), (0, 42f), (1, 33f)), 6 -> Seq((0, 28f), (2, 18f), (1, 16f)) ) Seq(2, 3, 4).foreach { k => val n = math.min(k, numUsers).toInt val expectedUpToN = expected.mapValues(_.slice(0, n)) val topUsers = getALSModel.recommendForAllItems(k) assert(topUsers.count() == numItems) assert(topUsers.columns.contains("item")) checkRecommendations(topUsers, expectedUpToN, "user") } } test("recommendForUserSubset with k <, = and > num_items") { val spark = this.spark import spark.implicits._ val model = getALSModel val numItems = model.itemFactors.count val expected = Map( 0 -> Seq((3, 54f), (4, 44f), (5, 42f), (6, 28f)), 2 -> Seq((3, 51f), (5, 45f), (4, 30f), (6, 18f)) ) val userSubset = expected.keys.toSeq.toDF("user") val numUsersSubset = userSubset.count Seq(2, 4, 6).foreach { k => val n = math.min(k, numItems).toInt val expectedUpToN = expected.mapValues(_.slice(0, n)) val topItems = model.recommendForUserSubset(userSubset, k) assert(topItems.count() == numUsersSubset) assert(topItems.columns.contains("user")) checkRecommendations(topItems, expectedUpToN, "item") } } test("recommendForItemSubset with k <, = and > num_users") { val spark = this.spark import spark.implicits._ val model = getALSModel val numUsers = model.userFactors.count val expected = Map( 3 -> Seq((0, 54f), (2, 51f), (1, 39f)), 6 -> Seq((0, 28f), (2, 18f), (1, 16f)) ) val itemSubset = expected.keys.toSeq.toDF("item") val numItemsSubset = itemSubset.count Seq(2, 3, 4).foreach { k => val n = math.min(k, numUsers).toInt val expectedUpToN = expected.mapValues(_.slice(0, n)) val topUsers = model.recommendForItemSubset(itemSubset, k) assert(topUsers.count() == numItemsSubset) assert(topUsers.columns.contains("item")) checkRecommendations(topUsers, expectedUpToN, "user") } } test("subset recommendations eliminate duplicate ids, returns same results as unique ids") { val spark = this.spark import spark.implicits._ val model = getALSModel val k = 2 val users = Seq(0, 1).toDF("user") val dupUsers = Seq(0, 1, 0, 1).toDF("user") val singleUserRecs = model.recommendForUserSubset(users, k) val dupUserRecs = model.recommendForUserSubset(dupUsers, k) .as[(Int, Seq[(Int, Float)])].collect().toMap assert(singleUserRecs.count == dupUserRecs.size) checkRecommendations(singleUserRecs, dupUserRecs, "item") val items = Seq(3, 4, 5).toDF("item") val dupItems = Seq(3, 4, 5, 4, 5).toDF("item") val singleItemRecs = model.recommendForItemSubset(items, k) val dupItemRecs = model.recommendForItemSubset(dupItems, k) .as[(Int, Seq[(Int, Float)])].collect().toMap assert(singleItemRecs.count == dupItemRecs.size) checkRecommendations(singleItemRecs, dupItemRecs, "user") } test("subset recommendations on full input dataset equivalent to recommendForAll") { val spark = this.spark import spark.implicits._ val model = getALSModel val k = 2 val userSubset = model.userFactors.withColumnRenamed("id", "user").drop("features") val userSubsetRecs = model.recommendForUserSubset(userSubset, k) val allUserRecs = model.recommendForAllUsers(k).as[(Int, Seq[(Int, Float)])].collect().toMap checkRecommendations(userSubsetRecs, allUserRecs, "item") val itemSubset = model.itemFactors.withColumnRenamed("id", "item").drop("features") val itemSubsetRecs = model.recommendForItemSubset(itemSubset, k) val allItemRecs = model.recommendForAllItems(k).as[(Int, Seq[(Int, Float)])].collect().toMap checkRecommendations(itemSubsetRecs, allItemRecs, "user") } test("ALS should not introduce unnecessary shuffle") { def getShuffledDependencies(rdd: RDD[_]): Seq[ShuffleDependency[_, _, _]] = { rdd.dependencies.flatMap { case s: ShuffleDependency[_, _, _] => Seq(s) ++ getShuffledDependencies(s.rdd) case o => Seq.empty ++ getShuffledDependencies(o.rdd) } } val spark = this.spark import spark.implicits._ val (ratings, _) = genExplicitTestData(numUsers = 2, numItems = 2, rank = 1) val data = ratings.toDF val model = new ALS() .setMaxIter(2) .setImplicitPrefs(true) .setCheckpointInterval(-1) .fit(data) val userFactors = model.userFactors val itemFactors = model.itemFactors val shuffledUserFactors = getShuffledDependencies(userFactors.rdd).filter { dep => dep.rdd.name != null && dep.rdd.name.contains("userFactors") } val shuffledItemFactors = getShuffledDependencies(itemFactors.rdd).filter { dep => dep.rdd.name != null && dep.rdd.name.contains("itemFactors") } assert(shuffledUserFactors.size == 0) assert(shuffledItemFactors.size == 0) } private def checkRecommendations( topK: DataFrame, expected: Map[Int, Seq[(Int, Float)]], dstColName: String): Unit = { val spark = this.spark import spark.implicits._ assert(topK.columns.contains("recommendations")) topK.as[(Int, Seq[(Int, Float)])].collect().foreach { case (id: Int, recs: Seq[(Int, Float)]) => assert(recs === expected(id)) } topK.collect().foreach { row => val recs = row.getAs[WrappedArray[Row]]("recommendations") assert(recs(0).fieldIndex(dstColName) == 0) assert(recs(0).fieldIndex("rating") == 1) } } } class ALSCleanerSuite extends SparkFunSuite with BeforeAndAfterEach { override def beforeEach(): Unit = { super.beforeEach() // Once `Utils.getOrCreateLocalRootDirs` is called, it is cached in `Utils.localRootDirs`. // Unless this is manually cleared before and after a test, it returns the same directory // set before even if 'spark.local.dir' is configured afterwards. Utils.clearLocalRootDirs() } override def afterEach(): Unit = { Utils.clearLocalRootDirs() super.afterEach() } test("ALS shuffle cleanup standalone") { val conf = new SparkConf() val localDir = Utils.createTempDir() val checkpointDir = Utils.createTempDir() def getAllFiles: Set[File] = FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet try { conf.set("spark.local.dir", localDir.getAbsolutePath) val sc = new SparkContext("local[2]", "test", conf) try { sc.setCheckpointDir(checkpointDir.getAbsolutePath) // Test checkpoint and clean parents val input = sc.parallelize(1 to 1000) val keyed = input.map(x => (x % 20, 1)) val shuffled = keyed.reduceByKey(_ + _) val keysOnly = shuffled.keys val deps = keysOnly.dependencies keysOnly.count() ALS.cleanShuffleDependencies(sc, deps, true) val resultingFiles = getAllFiles assert(resultingFiles === Set()) // Ensure running count again works fine even if we kill the shuffle files. keysOnly.count() } finally { sc.stop() } } finally { Utils.deleteRecursively(localDir) Utils.deleteRecursively(checkpointDir) } } test("ALS shuffle cleanup in algorithm") { val conf = new SparkConf() val localDir = Utils.createTempDir() val checkpointDir = Utils.createTempDir() def getAllFiles: Set[File] = FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet try { conf.set("spark.local.dir", localDir.getAbsolutePath) val sc = new SparkContext("local[2]", "ALSCleanerSuite", conf) try { sc.setCheckpointDir(checkpointDir.getAbsolutePath) // Generate test data val (training, _) = ALSSuite.genImplicitTestData(sc, 20, 5, 1, 0.2, 0) // Implicitly test the cleaning of parents during ALS training val spark = SparkSession.builder .sparkContext(sc) .getOrCreate() import spark.implicits._ val als = new ALS() .setRank(1) .setRegParam(1e-5) .setSeed(0) .setCheckpointInterval(1) .setMaxIter(7) val model = als.fit(training.toDF()) val resultingFiles = getAllFiles // We expect the last shuffles files, block ratings, user factors, and item factors to be // around but no more. val pattern = "shuffle_(\\\\d+)_.+\\\\.data".r val rddIds = resultingFiles.flatMap { f => pattern.findAllIn(f.getName()).matchData.map { _.group(1) } } assert(rddIds.size === 4) } finally { sc.stop() } } finally { Utils.deleteRecursively(localDir) Utils.deleteRecursively(checkpointDir) } } } class ALSStorageSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest with Logging { test("invalid storage params") { intercept[IllegalArgumentException] { new ALS().setIntermediateStorageLevel("foo") } intercept[IllegalArgumentException] { new ALS().setIntermediateStorageLevel("NONE") } intercept[IllegalArgumentException] { new ALS().setFinalStorageLevel("foo") } } test("default and non-default storage params set correct RDD StorageLevels") { val spark = this.spark import spark.implicits._ val data = Seq( (0, 0, 1.0), (0, 1, 2.0), (1, 2, 3.0), (1, 0, 2.0) ).toDF("user", "item", "rating") val als = new ALS().setMaxIter(1).setRank(1) // add listener to check intermediate RDD default storage levels val defaultListener = new IntermediateRDDStorageListener sc.addSparkListener(defaultListener) val model = als.fit(data) // check final factor RDD default storage levels val defaultFactorRDDs = sc.getPersistentRDDs.collect { case (id, rdd) if rdd.name == "userFactors" || rdd.name == "itemFactors" => rdd.name -> ((id, rdd.getStorageLevel)) }.toMap defaultFactorRDDs.foreach { case (_, (id, level)) => assert(level == StorageLevel.MEMORY_AND_DISK) } defaultListener.storageLevels.foreach(level => assert(level == StorageLevel.MEMORY_AND_DISK)) // add listener to check intermediate RDD non-default storage levels val nonDefaultListener = new IntermediateRDDStorageListener sc.addSparkListener(nonDefaultListener) val nonDefaultModel = als .setFinalStorageLevel("MEMORY_ONLY") .setIntermediateStorageLevel("DISK_ONLY") .fit(data) // check final factor RDD non-default storage levels val levels = sc.getPersistentRDDs.collect { case (id, rdd) if rdd.name == "userFactors" && rdd.id != defaultFactorRDDs("userFactors")._1 || rdd.name == "itemFactors" && rdd.id != defaultFactorRDDs("itemFactors")._1 => rdd.getStorageLevel } levels.foreach(level => assert(level == StorageLevel.MEMORY_ONLY)) nonDefaultListener.storageLevels.foreach(level => assert(level == StorageLevel.DISK_ONLY)) } } private class IntermediateRDDStorageListener extends SparkListener { val storageLevels: mutable.ArrayBuffer[StorageLevel] = mutable.ArrayBuffer() override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { val stageLevels = stageCompleted.stageInfo.rddInfos.collect { case info if info.name.contains("Blocks") || info.name.contains("Factors-") => info.storageLevel } storageLevels ++= stageLevels } } object ALSSuite extends Logging { /** * Mapping from all Params to valid settings which differ from the defaults. * This is useful for tests which need to exercise all Params, such as save/load. * This excludes input columns to simplify some tests. */ val allModelParamSettings: Map[String, Any] = Map( "predictionCol" -> "myPredictionCol" ) /** * Mapping from all Params to valid settings which differ from the defaults. * This is useful for tests which need to exercise all Params, such as save/load. * This excludes input columns to simplify some tests. */ val allEstimatorParamSettings: Map[String, Any] = allModelParamSettings ++ Map( "maxIter" -> 1, "rank" -> 1, "regParam" -> 0.01, "numUserBlocks" -> 2, "numItemBlocks" -> 2, "implicitPrefs" -> true, "alpha" -> 0.9, "nonnegative" -> true, "checkpointInterval" -> 20, "intermediateStorageLevel" -> "MEMORY_ONLY", "finalStorageLevel" -> "MEMORY_AND_DISK_SER" ) // Helper functions to generate test data we share between ALS test suites /** * Generates random user/item factors, with i.i.d. values drawn from U(a, b). * @param size number of users/items * @param rank number of features * @param random random number generator * @param a min value of the support (default: -1) * @param b max value of the support (default: 1) * @return a sequence of (ID, factors) pairs */ private def genFactors( size: Int, rank: Int, random: Random, a: Float = -1.0f, b: Float = 1.0f): Seq[(Int, Array[Float])] = { require(size > 0 && size < Int.MaxValue / 3) require(b > a) val ids = mutable.Set.empty[Int] while (ids.size < size) { ids += random.nextInt() } val width = b - a ids.toSeq.sorted.map(id => (id, Array.fill(rank)(a + random.nextFloat() * width))) } /** * Generates an implicit feedback dataset for testing ALS. * * @param sc SparkContext * @param numUsers number of users * @param numItems number of items * @param rank rank * @param noiseStd the standard deviation of additive Gaussian noise on training data * @param seed random seed * @return (training, test) */ def genImplicitTestData( sc: SparkContext, numUsers: Int, numItems: Int, rank: Int, noiseStd: Double = 0.0, seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = { // The assumption of the implicit feedback model is that unobserved ratings are more likely to // be negatives. val positiveFraction = 0.8 val negativeFraction = 1.0 - positiveFraction val trainingFraction = 0.6 val testFraction = 0.3 val totalFraction = trainingFraction + testFraction val random = new Random(seed) val userFactors = genFactors(numUsers, rank, random) val itemFactors = genFactors(numItems, rank, random) val training = ArrayBuffer.empty[Rating[Int]] val test = ArrayBuffer.empty[Rating[Int]] for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) { val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1) val threshold = if (rating > 0) positiveFraction else negativeFraction val observed = random.nextDouble() < threshold if (observed) { val x = random.nextDouble() if (x < totalFraction) { if (x < trainingFraction) { val noise = noiseStd * random.nextGaussian() training += Rating(userId, itemId, rating + noise.toFloat) } else { test += Rating(userId, itemId, rating) } } } } logInfo(s"Generated an implicit feedback dataset with ${training.size} ratings for training " + s"and ${test.size} for test.") (sc.parallelize(training, 2), sc.parallelize(test, 2)) } }
pgandhi999/spark
mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala
Scala
apache-2.0
46,620
/* Copyright (C) 2008-2014 University of Massachusetts Amherst. This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible) http://factorie.cs.umass.edu, http://github.com/factorie Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cc.factorie.util import scala.collection.{IndexedSeq, Seq} import scala.compat.Platform.arraycopy trait ProtectedDoubleArrayBuffer { protected def _initialCapacity = 4 private var _arr = new Array[Double](_initialCapacity) private var _size = 0 private var _lastIndex: Int = 0 @inline final protected def _setCapacity(cap:Int): Unit = { if (_arr.length != cap) { require(cap >= _size && cap >= 0) val newArray = new Array[Double](cap) if (_size > 0) arraycopy(_arr, 0, newArray, 0, _size) _arr = newArray } } protected def _capacityGrowthFactor: Double = 1.5 @inline final protected def _ensureCapacity(cap:Int): Unit = if (cap > _arr.length) _setCapacity(math.max(cap, (_arr.length * _capacityGrowthFactor).toInt)) protected def _considerShrinkingCapacity(): Unit = if (_size > 0 && _arr.length > _size * 2) _setCapacity(_size) protected def _trimCapacity(): Unit = _setCapacity(_size) // What if _size == 0? protected def _reduceToSize(newSize:Int): Unit = { _size = newSize; _considerShrinkingCapacity() } @inline final protected def _length = _size @inline final protected def _apply(index:Int): Double = _arr(index) @inline final protected def _update(index:Int, value:Double): Unit = _arr(index) = value @inline final protected def _increment(index:Int, incr:Double): Unit = { _ensureCapacity(index+1); _arr(index) += incr; if (_size < index+1) _size = index+1 } @inline final protected def _append(elem: Double): this.type = { _ensureCapacity(_size + 1); _arr(_size) = elem; _size += 1; this } protected def _copyToArray(a:Array[Double]): Unit = arraycopy(_arr, 0, a, 0, _size) protected def _mapToArray[A](a:Array[A], f:Double=>A): Unit = { var i = 0; while (i < _size) { a(i) = f(_arr(i)); i += 1 } } protected def _asSeq: IndexedSeq[Double] = new IndexedSeq[Double] { final def length = _size final def apply(i:Int): Double = _arr(i) } protected def _toSeq: IndexedSeq[Double] = new IndexedSeq[Double] { private val arr = new Array[Double](_size); arraycopy(_arr, 0, arr, 0, _size) final def length = arr.length final def apply(i:Int) = arr(i) } protected def _array: Array[Double] = _arr // Careful. _array.length may not equal _length @inline final protected def _asArray: Array[Double] = // Carefully, dangerous to access directly if (_size == _arr.length) _arr else { val a = new Array[Double](_size); arraycopy(_arr, 0, a, 0, _size); a } protected def _toArray: Array[Double] = { val a = new Array[Double](_size); arraycopy(_arr, 0, a, 0, _size); a } protected def _asDoubleSeq: DoubleSeq = new TruncatedArrayDoubleSeq(_arr, _size) protected def _takeAsIntSeq(len:Int): DoubleSeq = new TruncatedArrayDoubleSeq(_arr, math.min(len, _size)) protected def _sum: Double = { var s = 0.0; var i = 0; while (i < _size) { s += _arr(i); i += 1 }; s } /** Return the index containing the value i, or -1 if i is not found. */ protected def _indexOf(i:Double): Int = { var j = 0; while (j < _size) { if (_arr(j) == i) return j; j += 1 }; -1 } /** Return the index containing the value i, or -1 if i is not found. Do so more efficiently by assuming that the contents are sorted in ascending order. Look by starting near the last index as which a search was successful. */ protected def _indexOfSorted(i:Double): Int = { if (_size == 0) return -1 if (_lastIndex >= _size) _lastIndex = 0 if (_arr(_lastIndex) == i) _lastIndex else if (_arr(_lastIndex) < i) { var j = _lastIndex+1 while (j < _size && _arr(j) < i) { j += 1 } if (j < _size && _arr(j) == i) { _lastIndex = j; j } else -1 } else { var j = _lastIndex-1 while (j >= 0 && _arr(j) > i) { j -= 1 } if (j >= 0 && _arr(j) == i) { _lastIndex = j; j } else -1 } } /** Return the index at which value i should be inserted in order to maintain sorted order. This assumes that the existing elements already already sorted. If value i is already present, return its index. */ protected def _indexForInsertSorted(i:Double): Int = { if (_size == 0) return 0 if (_lastIndex >= _size) _lastIndex = 0 var j = 0 if (_arr(_lastIndex) == i) j = _lastIndex else if (_arr(_lastIndex) < i) j = _positionLte(i, _lastIndex+1, _size) else j = _positionLte(i, 0, _lastIndex) _lastIndex = j j } protected def _indexForInsertSortedLinear(i:Double): Int = { if (_size == 0) return 0 if (_lastIndex >= _size) _lastIndex = 0 var j = 0 if (_arr(_lastIndex) == i) j = _lastIndex else if (_arr(_lastIndex) < i) { j = _lastIndex+1 while (j < _size && _arr(j) < i) { j += 1 } } else { j = _lastIndex-1 while (j > 0 && _arr(j) < i) { j -= 1 } } _lastIndex = j j } /** Search the array '_arr' for the index at which value x could be inserted in sorted order. @param start the lowest index to consider @param end one plus the highest index that already contains data @return the index into '_arr' such that _arr(index) == x, or ind(index-1) < x < ind(index) or index == end. */ private def _positionLte(x:Double, start:Int, end:Int): Int = { val diff = end - start if (diff == 0) return start if (diff == 1) return if (_arr(start) >= x) start else end val middle = start + (diff / 2) val midval = _arr(middle) if (midval == x) middle else if (x < midval) _positionLte(x, start, middle) else _positionLte(x, middle+1, end) } /** Return true iff the integer 'index' is contained in _arr between positions 'start' and 'end-1' inclusive. Look by recursive binary search. */ private def _containsSorted(x:Double, start:Int, end:Int): Boolean = { // /println("SparseBinaryVector._contains "x+" "+start+" "+end+" diff="+diff) val diff = end - start if (diff == 0) return false if (diff == 1) return _arr(start) == x val middle = start + (diff / 2) val midval = _arr(middle) if (midval == x) true else if (x < midval) _containsSorted(x, start, middle) else _containsSorted(x, middle+1, end) } protected def _containsSorted(x:Double): Boolean = _containsSorted(x, 0, _size) protected def _clear(): Unit = { _arr = new Array[Double](_initialCapacity); _size = 0; _lastIndex = 0 } protected def _sizeHint(len: Int) = if (len >= _size && len >= 1) _setCapacity(len) protected def _set(elts: Array[Double]): Unit = { _ensureCapacity(elts.length); arraycopy(elts, 0, _arr, 0, elts.length); _size = elts.length } protected def _set(elts: Seq[Double]): Unit = { _ensureCapacity(elts.length); var i = elts.length; while (i >= 0) { _arr(i) = elts(i); i += 1 }; _size = elts.length } protected def _fill(elt:Double): Unit = { var i = 0; while (i < _size) { _arr(i) = elt; i += 1 } } protected def _appendAll(elts: Array[Double]): Unit = { _ensureCapacity(_size + elts.length) arraycopy(elts, 0, _arr, _size, elts.length) _size += elts.length _setCapacity(_size) // assuming won't soon be adding more, save memory & make _array more efficient } protected def _appendAll(elts: TraversableOnce[Double]): Unit = { val n = elts.size _ensureCapacity(_size + n) elts.foreach(i => { _arr(_size) = i; _size += 1 }) _setCapacity(_size) // assuming won't soon be adding more, save memory & make _array more efficient } protected def _prepend(elt: Double): Unit = { _ensureCapacity(_size + 1) arraycopy(_arr, 0, _arr, 1, _size) _arr(0) = elt _size += 1 } protected def _prependAll(elts: TraversableOnce[Double]): Unit = _insertAll(0, elts.toTraversable) protected def _insert(index: Int, elt:Double): Unit = { _ensureCapacity(_size + 1) if (index < _size) arraycopy(_arr, index, _arr, index + 1, _size - index) _arr(index) = elt _size += 1 } protected def _insertSorted(elt:Double): Unit = { val index = _indexForInsertSorted(elt) //assert(index >= 0 && index <= _size, index) _insert(index, elt) } protected def _insertSortedNoDuplicates(elt:Double): Unit = { val index = _indexForInsertSorted(elt) if (index >= _size || _arr(index) != elt) _insert(index, elt) } // TODO Make another version of this that works on DoubleSeq instead of Traversable[Double] protected def _insertAll(index: Int, seq: scala.collection.Traversable[Double]): Unit = { if (index < 0 || index > _size) throw new IndexOutOfBoundsException(index.toString) val xs = seq.toList val len = xs.length _ensureCapacity(_size + len) arraycopy(_arr, index, _arr, index+len, _size-index) xs.copyToArray(_arr, index) _size += len } protected def _insertAllSorted(seq: scala.collection.Traversable[Double]): Unit = throw new Error("Not yet implemented.") protected def _remove(index: Int, count: Int) { require(count >= 0, "removing non-positive number of elements") if (index < 0 || index > _size - count) throw new IndexOutOfBoundsException(index.toString) arraycopy(_arr, index + count, _arr, index, _size - (index + count)) _reduceToSize(_size - count) } protected def _remove(index: Int): Unit = _remove(index, 1) } class DoubleArrayBuffer extends ProtectedDoubleArrayBuffer with DenseDoubleSeq { def this(initialCapacity:Int) = { this(); _setCapacity(initialCapacity) } def apply(index:Int): Double = _apply(index) def update(index:Int, value:Double): Unit = _update(index, value) def length: Int = _length def zero(): Unit = _fill(0.0) override def toArray = _toArray // Note that the method name below could be easily confused with MutableDoubleSeq.+=, which does something very different! def +=(i:Double): Unit = _append(i) //def +=(index:Int, d:Double): Unit = _increment(index, d) // Removed because starts to make method names too confusable with MutableDoubleSeq def ++=(is:Array[Double]): Unit = _appendAll(is) def ++=(is:Seq[Double]): Unit = _appendAll(is) def +=:(i:Double): Unit = _prepend(i) def insert(index:Int, elt:Double): Unit = _insert(index, elt) override def toSeq = _toSeq override def _array = super[ProtectedDoubleArrayBuffer]._array }
patverga/factorie
src/main/scala/cc/factorie/util/DoubleArrayBuffer.scala
Scala
apache-2.0
10,930
package rubiz package syntax import scalaz.{ -\\/, \\/, \\/- } import scalaz.concurrent.Task import scala.util.{ Try, Success, Failure } import either._ trait TrySyntax { implicit final def tryOps[A](t: Try[A]): TryOps[A] = new TryOps[A](t) } final class TryOps[A](val t: Try[A]) extends AnyVal { def toDisjunction: Throwable \\/ A = t match { case Failure(ex) => -\\/(ex) case Success(a) => \\/-(a) } def toTask: Task[A] = toDisjunction.toTask }
rubicon-project/rubiz
src/main/scala/rubiz/syntax/try.scala
Scala
bsd-2-clause
461
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package java.util import scala.annotation.tailrec import java.{util => ju} import java.util.function.{BiConsumer, BiFunction, Function} import ScalaOps._ class HashMap[K, V](initialCapacity: Int, loadFactor: Float) extends AbstractMap[K, V] with Serializable with Cloneable { self => import HashMap._ if (initialCapacity < 0) throw new IllegalArgumentException("initialCapacity < 0") if (loadFactor <= 0.0f) throw new IllegalArgumentException("loadFactor <= 0.0") def this() = this(HashMap.DEFAULT_INITIAL_CAPACITY, HashMap.DEFAULT_LOAD_FACTOR) def this(initialCapacity: Int) = this(initialCapacity, HashMap.DEFAULT_LOAD_FACTOR) def this(m: Map[_ <: K, _ <: V]) = { this(m.size()) putAll(m) } /** The actual hash table. * * In each bucket, nodes are sorted by increasing value of `hash`. * * Deviation from the JavaDoc: we do not use `initialCapacity` as is for the * number of buckets. Instead we round it up to the next power of 2. This * allows some algorithms to be more efficient, notably `index()` and * `growTable()`. Since the number of buckets is not observable from the * outside, this deviation does not change any semantics. */ private[this] var table = new Array[Node[K, V]](tableSizeFor(initialCapacity)) /** The next size value at which to resize (capacity * load factor). */ private[this] var threshold: Int = newThreshold(table.length) private[this] var contentSize: Int = 0 /* Internal API for LinkedHashMap: these methods are overridden in * LinkedHashMap to implement its insertion- or access-order. */ private[util] def newNode(key: K, hash: Int, value: V, previous: Node[K, V], next: Node[K, V]): Node[K, V] = { new Node(key, hash, value, previous, next) } private[util] def nodeWasAccessed(node: Node[K, V]): Unit = () private[util] def nodeWasAdded(node: Node[K, V]): Unit = () private[util] def nodeWasRemoved(node: Node[K, V]): Unit = () // Public API override def size(): Int = contentSize override def isEmpty(): Boolean = contentSize == 0 override def get(key: Any): V = getOrDefaultImpl(key, null.asInstanceOf[V]) override def containsKey(key: Any): Boolean = findNode(key) ne null override def put(key: K, value: V): V = put0(key, value, ifAbsent = false) override def putAll(m: Map[_ <: K, _ <: V]): Unit = { m match { case m: ju.HashMap[_, _] => val iter = m.nodeIterator() while (iter.hasNext()) { val next = iter.next() put0(next.key, next.value, next.hash, ifAbsent = false) } case _ => super.putAll(m) } } override def remove(key: Any): V = { val node = remove0(key) if (node eq null) null.asInstanceOf[V] else node.value } override def clear(): Unit = { ju.Arrays.fill(table.asInstanceOf[Array[AnyRef]], null) contentSize = 0 } override def containsValue(value: Any): Boolean = valueIterator().scalaOps.exists(Objects.equals(value, _)) override def keySet(): ju.Set[K] = new KeySet override def values(): ju.Collection[V] = new Values def entrySet(): ju.Set[ju.Map.Entry[K, V]] = new EntrySet override def getOrDefault(key: Any, defaultValue: V): V = getOrDefaultImpl(key, defaultValue) /** Common implementation for get() and getOrDefault(). * * It is not directly inside the body of getOrDefault(), because subclasses * could override getOrDefault() to re-rely on get(). */ private def getOrDefaultImpl(key: Any, defaultValue: V): V = { val node = findNode(key) if (node eq null) { defaultValue } else { nodeWasAccessed(node) node.value } } override def putIfAbsent(key: K, value: V): V = put0(key, value, ifAbsent = true) override def remove(key: Any, value: Any): Boolean = { val (node, idx) = findNodeAndIndexForRemoval(key) if ((node ne null) && Objects.equals(node.value, value)) { remove0(node, idx) true } else { false } } override def replace(key: K, oldValue: V, newValue: V): Boolean = { val node = findNode(key) if ((node ne null) && Objects.equals(node.value, oldValue)) { node.value = newValue nodeWasAccessed(node) true } else { false } } override def replace(key: K, value: V): V = { val node = findNode(key) if (node ne null) { val old = node.value node.value = value nodeWasAccessed(node) old } else { null.asInstanceOf[V] } } override def computeIfAbsent(key: K, mappingFunction: Function[_ >: K, _ <: V]): V = { val (node, hash, idx, oldValue) = getNode0(key) if (oldValue != null) { oldValue } else { val newValue = mappingFunction.apply(key) if (newValue != null) put0(key, newValue, hash, node) newValue } } override def computeIfPresent(key: K, remappingFunction: BiFunction[_ >: K, _ >: V, _ <: V]): V = { val (node, hash, idx, oldValue) = getNode0(key) if (oldValue == null) { oldValue } else { val newValue = remappingFunction.apply(key, oldValue) putOrRemove0(key, hash, idx, node, newValue) } } override def compute(key: K, remappingFunction: BiFunction[_ >: K, _ >: V, _ <: V]): V = { val (node, hash, idx, oldValue) = getNode0(key) val newValue = remappingFunction.apply(key, oldValue) putOrRemove0(key, hash, idx, node, newValue) } override def merge(key: K, value: V, remappingFunction: BiFunction[_ >: V, _ >: V, _ <: V]): V = { Objects.requireNonNull(value) val (node, hash, idx, oldValue) = getNode0(key) val newValue = if (oldValue == null) value else remappingFunction.apply(oldValue, value) putOrRemove0(key, hash, idx, node, newValue) } override def forEach(action: BiConsumer[_ >: K, _ >: V]): Unit = { val len = table.length var i = 0 while (i != len) { var node = table(i) while (node ne null) { action.accept(node.key, node.value) node = node.next } i += 1 } } override def clone(): AnyRef = new HashMap[K, V](this) // Elementary operations @inline private def index(hash: Int): Int = hash & (table.length - 1) @inline private def findNode(key: Any): Node[K, V] = { val hash = computeHash(key) findNode0(key, hash, index(hash)) } @inline private def findNodeAndIndexForRemoval(key: Any): (Node[K, V], Int) = { val hash = computeHash(key) val idx = index(hash) val node = findNode0(key, hash, idx) (node, idx) } private def findNode0(key: Any, hash: Int, idx: Int): Node[K, V] = { @inline @tailrec def loop(node: Node[K, V]): Node[K, V] = { if (node eq null) null else if (hash == node.hash && Objects.equals(key, node.key)) node else if (hash < node.hash) null else loop(node.next) } loop(table(idx)) } // Helpers for compute-like methods @inline private def getNode0(key: Any): (Node[K, V], Int, Int, V) = { val hash = computeHash(key) val idx = index(hash) val node = findNode0(key, hash, idx) val value = if (node eq null) { null.asInstanceOf[V] } else { nodeWasAccessed(node) node.value } (node, hash, idx, value) } private def putOrRemove0(key: K, hash: Int, idx: Int, node: Node[K, V], newValue: V): V = { if (newValue != null) put0(key, newValue, hash, node) else if (node ne null) remove0(node, idx) newValue } // Heavy lifting: modifications /** Puts a key-value pair into this map. * * If an entry already exists for the given key, `nodeWasAccessed` is * called, and, unless `ifAbsent` is true, its value is updated. * * If no entry existed for the given key, a new entry is created with the * given value, and `nodeWasAdded` is called. * * @param key the key to put * @param value the value to put * @param ifAbsent if true, do not override an existing mapping * @return the old value associated with `key`, or `null` if there was none */ @inline private[this] def put0(key: K, value: V, ifAbsent: Boolean): V = put0(key, value, computeHash(key), ifAbsent) /** Puts a key-value pair into this map. * * If an entry already exists for the given key, `nodeWasAccessed` is * called, and, unless `ifAbsent` is true, its value is updated. * * If no entry existed for the given key, a new entry is created with the * given value, and `nodeWasAdded` is called. * * @param key the key to put * @param value the value to put * @param hash the **improved** hashcode of `key` (see computeHash) * @param ifAbsent if true, do not override an existing mapping * @return the old value associated with `key`, or `null` if there was none */ private[this] def put0(key: K, value: V, hash: Int, ifAbsent: Boolean): V = { // scalastyle:off return val newContentSize = contentSize + 1 if (newContentSize >= threshold) growTable() val idx = index(hash) val newNode = table(idx) match { case null => val newNode = this.newNode(key, hash, value, null, null) table(idx) = newNode newNode case first => var prev: Node[K, V] = null var n = first while ((n ne null) && n.hash <= hash) { if (n.hash == hash && Objects.equals(key, n.key)) { nodeWasAccessed(n) val old = n.value if (!ifAbsent || (old == null)) n.value = value return old } prev = n n = n.next } val newNode = this.newNode(key, hash, value, prev, n) if (prev eq null) table(idx) = newNode else prev.next = newNode if (n ne null) n.previous = newNode newNode } contentSize = newContentSize nodeWasAdded(newNode) null.asInstanceOf[V] // scalastyle:on return } /** Puts a key-value pair into this map, given the result of an existing * lookup. * * The parameter `node` must be the result of a lookup for the given key. * If null, this method assumes that there is no entry for the given key in * the map. * * `nodeWasAccessed` is NOT called by this method, since it must already * have been called by the prerequisite lookup. * * If no entry existed for the given key, a new entry is created with the * given value, and `nodeWasAdded` is called. * * @param key the key to add * @param value the value to add * @param hash the **improved** hashcode of `key` (see computeHash) * @param node the entry for the given `key`, or `null` if there is no such entry */ private[this] def put0(key: K, value: V, hash: Int, node: Node[K, V]): Unit = { if (node ne null) { node.value = value } else { val newContentSize = contentSize + 1 if (newContentSize >= threshold) growTable() val idx = index(hash) val newNode = table(idx) match { case null => val newNode = this.newNode(key, hash, value, null, null) table(idx) = newNode newNode case first => var prev: Node[K, V] = null var n = first while ((n ne null) && n.hash < hash) { prev = n n = n.next } val newNode = this.newNode(key, hash, value, prev, n) if (prev eq null) table(idx) = newNode else prev.next = newNode if (n ne null) n.previous = newNode newNode } contentSize = newContentSize nodeWasAdded(newNode) } } /** Removes a key from this map if it exists. * * @param key the key to remove * @return the node that contained `key` if it was present, otherwise null */ private def remove0(key: Any): Node[K, V] = { val (node, idx) = findNodeAndIndexForRemoval(key) if (node ne null) remove0(node, idx) node } private[util] final def removeNode(node: Node[K, V]): Unit = remove0(node, index(node.hash)) private def remove0(node: Node[K, V], idx: Int): Unit = { val previous = node.previous val next = node.next if (previous eq null) table(idx) = next else previous.next = next if (next ne null) next.previous = previous contentSize -= 1 nodeWasRemoved(node) } /** Grow the size of the table (always times 2). */ private[this] def growTable(): Unit = { val oldTable = table val oldlen = oldTable.length val newlen = oldlen * 2 val newTable = new Array[Node[K, V]](newlen) table = newTable threshold = newThreshold(newlen) /* Split the nodes of each bucket from the old table into the "low" and * "high" indices of the new table. Since the new table contains exactly * twice as many buckets as the old table, every index `i` from the old * table is split into indices `i` and `oldlen + i` in the new table. */ var i = 0 while (i < oldlen) { var lastLow: Node[K, V] = null var lastHigh: Node[K, V] = null var node = oldTable(i) while (node ne null) { if ((node.hash & oldlen) == 0) { // go to low node.previous = lastLow if (lastLow eq null) newTable(i) = node else lastLow.next = node lastLow = node } else { // go to high node.previous = lastHigh if (lastHigh eq null) newTable(oldlen + i) = node else lastHigh.next = node lastHigh = node } node = node.next } if (lastLow ne null) lastLow.next = null if (lastHigh ne null) lastHigh.next = null i += 1 } } /** Rounds up `capacity` to a power of 2, with a maximum of 2^30. */ @inline private[this] def tableSizeFor(capacity: Int): Int = Math.min(Integer.highestOneBit(Math.max(capacity - 1, 4)) * 2, 1 << 30) @inline private[this] def newThreshold(size: Int): Int = (size.toDouble * loadFactor.toDouble).toInt // Iterators private[util] def nodeIterator(): ju.Iterator[Node[K, V]] = new NodeIterator private[util] def keyIterator(): ju.Iterator[K] = new KeyIterator private[util] def valueIterator(): ju.Iterator[V] = new ValueIterator // The cast works around the lack of definition-site variance private[util] final def entrySetIterator(): ju.Iterator[Map.Entry[K, V]] = nodeIterator().asInstanceOf[ju.Iterator[Map.Entry[K, V]]] private final class NodeIterator extends AbstractHashMapIterator[Node[K, V]] { protected[this] def extract(node: Node[K, V]): Node[K, V] = node } private final class KeyIterator extends AbstractHashMapIterator[K] { protected[this] def extract(node: Node[K, V]): K = node.key } private final class ValueIterator extends AbstractHashMapIterator[V] { protected[this] def extract(node: Node[K, V]): V = node.value } private abstract class AbstractHashMapIterator[A] extends ju.Iterator[A] { private[this] val len = table.length private[this] var nextIdx: Int = _ // 0 private[this] var nextNode: Node[K, V] = _ // null private[this] var lastNode: Node[K, V] = _ // null protected[this] def extract(node: Node[K, V]): A /* Movements of `nextNode` and `nextIdx` are spread over `hasNext()` to * simplify initial conditions, and preserving as much performance as * possible while guaranteeing that constructing the iterator remains O(1) * (the first linear behavior can happen when calling `hasNext()`, not * before). */ def hasNext(): Boolean = { // scalastyle:off return if (nextNode ne null) { true } else { while (nextIdx < len) { val node = table(nextIdx) nextIdx += 1 if (node ne null) { nextNode = node return true } } false } // scalastyle:on return } def next(): A = { if (!hasNext()) throw new NoSuchElementException("next on empty iterator") val node = nextNode lastNode = node nextNode = node.next extract(node) } override def remove(): Unit = { val last = lastNode if (last eq null) throw new IllegalStateException("next must be called at least once before remove") removeNode(last) lastNode = null } } // Views private final class KeySet extends AbstractSet[K] { def iterator(): Iterator[K] = keyIterator() def size(): Int = self.size() override def contains(o: Any): Boolean = containsKey(o) override def remove(o: Any): Boolean = self.remove0(o) ne null override def clear(): Unit = self.clear() } private final class Values extends AbstractCollection[V] { def iterator(): ju.Iterator[V] = valueIterator() def size(): Int = self.size() override def contains(o: Any): Boolean = containsValue(o) override def clear(): Unit = self.clear() } private final class EntrySet extends AbstractSet[Map.Entry[K, V]] { def iterator(): Iterator[Map.Entry[K, V]] = entrySetIterator() def size(): Int = self.size() override def contains(o: Any): Boolean = o match { case o: Map.Entry[_, _] => val node = findNode(o.getKey()) (node ne null) && Objects.equals(node.getValue(), o.getValue()) case _ => false } override def remove(o: Any): Boolean = o match { case o: Map.Entry[_, _] => val key = o.getKey() val (node, idx) = findNodeAndIndexForRemoval(key) if ((node ne null) && Objects.equals(node.getValue(), o.getValue())) { remove0(node, idx) true } else { false } case _ => false } override def clear(): Unit = self.clear() } } object HashMap { private[util] final val DEFAULT_INITIAL_CAPACITY = 16 private[util] final val DEFAULT_LOAD_FACTOR = 0.75f /** Computes the improved hash of an original (`any.hashCode()`) hash. */ @inline private def improveHash(originalHash: Int): Int = { /* Improve the hash by xoring the high 16 bits into the low 16 bits just in * case entropy is skewed towards the high-value bits. We only use the * lowest bits to determine the hash bucket. * * This function is also its own inverse. That is, for all ints i, * improveHash(improveHash(i)) = i * this allows us to retrieve the original hash when we need it, and that * is why unimproveHash simply forwards to this method. */ originalHash ^ (originalHash >>> 16) } /** Performs the inverse operation of improveHash. * * In this case, it happens to be identical to improveHash. */ @inline private def unimproveHash(improvedHash: Int): Int = improveHash(improvedHash) /** Computes the improved hash of this key */ @inline private def computeHash(k: Any): Int = if (k == null) 0 else improveHash(k.hashCode()) private[util] class Node[K, V](val key: K, val hash: Int, var value: V, var previous: Node[K, V], var next: Node[K, V]) extends Map.Entry[K, V] { def getKey(): K = key def getValue(): V = value def setValue(v: V): V = { val oldValue = value value = v oldValue } override def equals(that: Any): Boolean = that match { case that: Map.Entry[_, _] => Objects.equals(getKey(), that.getKey()) && Objects.equals(getValue(), that.getValue()) case _ => false } override def hashCode(): Int = unimproveHash(hash) ^ Objects.hashCode(value) override def toString(): String = "" + getKey() + "=" + getValue() } }
scala-js/scala-js
javalib/src/main/scala/java/util/HashMap.scala
Scala
apache-2.0
20,315
package com.classification import io.prediction.controller.PPreparator import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import org.apache.spark.mllib.regression.LabeledPoint class PreparedData( val labeledPoints: RDD[LabeledPoint] ) extends Serializable class Preparator extends PPreparator[TrainingData, PreparedData] { def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = { new PreparedData(trainingData.labeledPoints) } }
PredictionIO/open-academy
AminManna/MyClassification/src/main/scala/Preparator.scala
Scala
apache-2.0
480
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.compiler.v2_3.commands import org.neo4j.cypher.internal.compiler.v2_3._ import org.neo4j.cypher.internal.compiler.v2_3.commands.expressions._ import org.neo4j.cypher.internal.compiler.v2_3.commands.values.TokenType._ import org.neo4j.cypher.internal.compiler.v2_3.pipes.QueryStateHelper import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite class PropertyValueComparisonTest extends CypherFunSuite { private val expectedNull = null.asInstanceOf[Any] test("nullNodeShouldGiveNullProperty") { val p = Property(Identifier("identifier"), PropertyKey("property")) val ctx = ExecutionContext.from("identifier" -> null) val state = QueryStateHelper.empty p(ctx)(state) should equal(expectedNull) } test("nonExistentPropertyShouldEvaluateToNull") { val p = Property(Identifier("identifier"), PropertyKey("nonExistent")) val ctx = ExecutionContext.from("identifier" -> Map("property" -> 42)) val state = QueryStateHelper.empty p(ctx)(state) should equal(expectedNull) } }
HuangLS/neo4j
community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/commands/PropertyValueComparisonTest.scala
Scala
apache-2.0
1,863
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.util import org.apache.spark.mllib.linalg.Vectors import org.scalatest.FunSuite import org.apache.spark.mllib.util.TestingUtils._ import org.scalatest.exceptions.TestFailedException class TestingUtilsSuite extends FunSuite { test("Comparing doubles using relative error.") { assert(23.1 ~== 23.52 relTol 0.02) assert(23.1 ~== 22.74 relTol 0.02) assert(23.1 ~= 23.52 relTol 0.02) assert(23.1 ~= 22.74 relTol 0.02) assert(!(23.1 !~= 23.52 relTol 0.02)) assert(!(23.1 !~= 22.74 relTol 0.02)) // Should throw exception with message when test fails. intercept[TestFailedException](23.1 !~== 23.52 relTol 0.02) intercept[TestFailedException](23.1 !~== 22.74 relTol 0.02) intercept[TestFailedException](23.1 ~== 23.63 relTol 0.02) intercept[TestFailedException](23.1 ~== 22.34 relTol 0.02) assert(23.1 !~== 23.63 relTol 0.02) assert(23.1 !~== 22.34 relTol 0.02) assert(23.1 !~= 23.63 relTol 0.02) assert(23.1 !~= 22.34 relTol 0.02) assert(!(23.1 ~= 23.63 relTol 0.02)) assert(!(23.1 ~= 22.34 relTol 0.02)) // Comparing against zero should fail the test and throw exception with message // saying that the relative error is meaningless in this situation. intercept[TestFailedException](0.1 ~== 0.0 relTol 0.032) intercept[TestFailedException](0.1 ~= 0.0 relTol 0.032) intercept[TestFailedException](0.1 !~== 0.0 relTol 0.032) intercept[TestFailedException](0.1 !~= 0.0 relTol 0.032) intercept[TestFailedException](0.0 ~== 0.1 relTol 0.032) intercept[TestFailedException](0.0 ~= 0.1 relTol 0.032) intercept[TestFailedException](0.0 !~== 0.1 relTol 0.032) intercept[TestFailedException](0.0 !~= 0.1 relTol 0.032) // Comparisons of numbers very close to zero. assert(10 * Double.MinPositiveValue ~== 9.5 * Double.MinPositiveValue relTol 0.01) assert(10 * Double.MinPositiveValue !~== 11 * Double.MinPositiveValue relTol 0.01) assert(-Double.MinPositiveValue ~== 1.18 * -Double.MinPositiveValue relTol 0.012) assert(-Double.MinPositiveValue ~== 1.38 * -Double.MinPositiveValue relTol 0.012) } test("Comparing doubles using absolute error.") { assert(17.8 ~== 17.99 absTol 0.2) assert(17.8 ~== 17.61 absTol 0.2) assert(17.8 ~= 17.99 absTol 0.2) assert(17.8 ~= 17.61 absTol 0.2) assert(!(17.8 !~= 17.99 absTol 0.2)) assert(!(17.8 !~= 17.61 absTol 0.2)) // Should throw exception with message when test fails. intercept[TestFailedException](17.8 !~== 17.99 absTol 0.2) intercept[TestFailedException](17.8 !~== 17.61 absTol 0.2) intercept[TestFailedException](17.8 ~== 18.01 absTol 0.2) intercept[TestFailedException](17.8 ~== 17.59 absTol 0.2) assert(17.8 !~== 18.01 absTol 0.2) assert(17.8 !~== 17.59 absTol 0.2) assert(17.8 !~= 18.01 absTol 0.2) assert(17.8 !~= 17.59 absTol 0.2) assert(!(17.8 ~= 18.01 absTol 0.2)) assert(!(17.8 ~= 17.59 absTol 0.2)) // Comparisons of numbers very close to zero, and both side of zeros assert(Double.MinPositiveValue ~== 4 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue) assert(Double.MinPositiveValue !~== 6 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue) assert(-Double.MinPositiveValue ~== 3 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue) assert(Double.MinPositiveValue !~== -4 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue) } test("Comparing vectors using relative error.") { //Comparisons of two dense vectors assert(Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01) assert(Vectors.dense(Array(3.1, 3.5)) !~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01) assert(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01) assert(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01) assert(!(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01)) assert(!(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01)) // Should throw exception with message when test fails. intercept[TestFailedException]( Vectors.dense(Array(3.1, 3.5)) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01) intercept[TestFailedException]( Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01) // Comparing against zero should fail the test and throw exception with message // saying that the relative error is meaningless in this situation. intercept[TestFailedException]( Vectors.dense(Array(3.1, 0.01)) ~== Vectors.dense(Array(3.13, 0.0)) relTol 0.01) intercept[TestFailedException]( Vectors.dense(Array(3.1, 0.01)) ~== Vectors.sparse(2, Array(0), Array(3.13)) relTol 0.01) // Comparisons of two sparse vectors assert(Vectors.dense(Array(3.1, 3.5)) ~== Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01) assert(Vectors.dense(Array(3.1, 3.5)) !~== Vectors.sparse(2, Array(0, 1), Array(3.135, 3.534)) relTol 0.01) } test("Comparing vectors using absolute error.") { //Comparisons of two dense vectors assert(Vectors.dense(Array(3.1, 3.5, 0.0)) ~== Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6) assert(Vectors.dense(Array(3.1, 3.5, 0.0)) !~== Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6) assert(Vectors.dense(Array(3.1, 3.5, 0.0)) ~= Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6) assert(Vectors.dense(Array(3.1, 3.5, 0.0)) !~= Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6) assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) !~= Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6)) assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) ~= Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)) // Should throw exception with message when test fails. intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) !~== Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6) intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) ~== Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6) // Comparisons of two sparse vectors assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~== Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) absTol 1E-6) assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) ~== Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6) assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~== Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) absTol 1E-6) assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) !~== Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6) // Comparisons of a dense vector and a sparse vector assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~== Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) absTol 1E-6) assert(Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) ~== Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6) assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~== Vectors.dense(Array(3.1, 1E-3, 2.4)) absTol 1E-6) } }
trueyao/spark-lever
mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtilsSuite.scala
Scala
apache-2.0
8,205
package rd.main import java.io.File import java.io.FileFilter import java.text.SimpleDateFormat import java.util.ArrayList import java.util.Random import scala.collection.mutable.ListBuffer import scala.io.Source import org.bson.Document import com.mongodb.MongoClient import com.mongodb.client.MongoCollection import akka.actor.Actor import akka.actor.ActorRef import akka.actor.ActorSystem import akka.actor.Props import akka.actor.actorRef2Scala import java.util.Date case class DataToLoad(fileList: ListBuffer[File], numLoaders: Int) case class FileProcessed(numEntriesLoaded: Int) /** * Geo Trails Data Loader, uses Actors * Data and information about it can be found here: https://www.microsoft.com/en-us/download/details.aspx?id=52367 */ object GeoTrailsLoader { val MONGO_HOST = "localhost" val MONGO_PORT = 27017 val GEODATA_PATH = "D:/Geolife Trajectories 1.3/Data" val COLLECTION_NAME = "lifetracks" val DB_NAME = "Geo" val NUM_LOADERS = 4 val PLT_FILETYPE_FILTER = new FileFilter() { def accept(file: File): Boolean = { return (file.getName().endsWith(".plt") || file.isDirectory()) } } /** * Recursive load all files matching filter condition * @param Root Directory * @param Flattened list of files * @param PLT file filter */ def load(rootDir: File, list: ListBuffer[File], filter: FileFilter): Unit = { if (rootDir != null && rootDir.exists() && rootDir.isDirectory()) { rootDir.listFiles(filter).toList.foreach { f => { load(f, list, filter) } } } if (rootDir.isFile()) { list.append(rootDir); } } def main(args: Array[String]) { //Collect all data files val list = new ListBuffer[File]() load(new File(GEODATA_PATH), list, PLT_FILETYPE_FILTER) val totalFiles = list.size println(s"Total Files to load: ${totalFiles}") //Init Mongo and get Destination Collection val client = new MongoClient(MONGO_HOST, MONGO_PORT) val tracks = client.getDatabase(DB_NAME).getCollection(COLLECTION_NAME); tracks.drop(); //Init Actors val mainActor: ActorRef = ActorSystem("MainActor").actorOf(Props(new MainActor(tracks))) mainActor ! DataToLoad(list, NUM_LOADERS) } } object MongoLoaderActor { /** * Execute the load: extract from file, load into doc structure and then add to list for bulk writing */ def execute(fileName: String, mongoCollection: MongoCollection[Document]): Int = { val list = new ArrayList[Document](); Source.fromFile(fileName).getLines().filter { str => str.length() > 31 }.map { str => extract(str) }.map { row => load(row, fileName) }.foreach { x => list.add(x) }; mongoCollection.insertMany(list); return list.size() } def extract(str: String): Array[String] = { val row: Array[String] = str.split(","); if (row.length == 7) { return row; } else { return null; } } //Load the data using Geocoded point (WGS 84) def load(row: Array[String], fileName: String): Document = { val dateTimeFormat: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); //{ type: "Point", coordinates: [ long, lat ] } WGS 84 val point = new Document(); point.put("type", "Point"); val coord: ArrayList[Double] = new ArrayList[Double]; coord.add(row(1).toDouble); coord.add(row(0).toDouble); point.put("coordinates", coord); val doc = new Document(); doc.put("source", fileName); doc.put("loc", point); doc.put("alt_ft", row(3).toDouble); val dateTime = (row(5) + " " + row(6)).trim(); try { doc.put("date_time", dateTimeFormat.parse(dateTime)) } catch { case e: Exception => { System.err.println("Error Processing: " + dateTime); e.printStackTrace(); } } return doc } } class MainActor(mongoCollection: MongoCollection[Document]) extends Actor { var totalFilesToProcess: Int = 0 var totalEntriesLoaded: Long = 0L var totalFilesProcessed: Long = 0L def receive = { case DataToLoad(fileList: ListBuffer[File], numLoaders: Int) => { totalFilesToProcess = fileList.size; if (totalFilesToProcess > 0) { val loaders = createWorkers(numLoaders, mongoCollection) fileList.zipWithIndex.foreach(e => loaders(e._2 % numLoaders) ! e._1.toString()) } } case FileProcessed(numEntriesLoaded: Int) => { totalEntriesLoaded += numEntriesLoaded totalFilesProcessed += 1L println(s"Processed ${totalFilesProcessed}/${totalFilesToProcess} files") if (totalFilesProcessed == totalFilesToProcess) { println(s"Completed processing all ${totalFilesToProcess} files") context.system.terminate() } } } private def createWorkers(numActors: Int, mongoCollection: MongoCollection[Document]) = { for (i <- 0 until numActors) yield context.actorOf(Props(new MongoLoaderActor(mongoCollection)), name = s"loader-${i}") } } /** * File Process Actor - MongoCollection to send the data to and Logger Actor */ class MongoLoaderActor(mongoCollection: MongoCollection[Document]) extends Actor { def receive = { case (fileName: String) => { val count: Int = MongoLoaderActor.execute(fileName, mongoCollection) context.parent ! FileProcessed(count); } } }
amachwe/Scala-Machine-Learning
src/main/scala/rd/main/GeoTrailsLoader.scala
Scala
gpl-3.0
5,325
package com.twitter.finagle import com.twitter.conversions.DurationOps._ import com.twitter.finagle import com.twitter.finagle.http.{Request, Response} import com.twitter.finagle.http.ssl.HttpSslTestComponents import com.twitter.finagle.param.OppTls import com.twitter.finagle.ssl.{ClientAuth, OpportunisticTls, SnoopingLevelInterpreter, TlsSnooping} import com.twitter.finagle.transport.Transport import com.twitter.util.{Await, Awaitable, Future} import java.net.InetSocketAddress import org.scalatest.funsuite.AnyFunSuite class TlsSnoopingEndToEndTest extends AnyFunSuite { private[this] def await[T](t: Awaitable[T]): T = Await.result(t, 5.seconds) def serverImpl( useH2: Boolean, clientAuth: ClientAuth, level: OpportunisticTls.Level, snooper: TlsSnooping.Snooper ): finagle.Http.Server = { val base = finagle.Http.server.configured(SnoopingLevelInterpreter.EnabledForNonNegotiatingProtocols) (if (useH2) base.withHttp2 else base.withNoHttp2) .configured(Transport.ServerSsl( Some(HttpSslTestComponents.unauthenticatedServerConfig.copy(clientAuth = clientAuth)))) .configured(OppTls(Some(level))) .configured(TlsSnooping.Param(snooper)) } def clientImpl(useH2: Boolean): finagle.Http.Client = if (useH2) finagle.Http.client.withHttp2 else finagle.Http.client.withNoHttp2 private def doADispatch( useH2: Boolean, clientAuth: ClientAuth, level: OpportunisticTls.Level, snooper: TlsSnooping.Snooper ): Response = { val server = serverImpl(useH2, clientAuth, level, snooper) .serve("localhost:*", Service.mk { _: Request => Future.value(Response()) }) val addr = server.boundAddress.asInstanceOf[InetSocketAddress] val client = clientImpl(useH2) .newService(s"${addr.getHostName}:${addr.getPort}", "client") try { await(client(Request())) // Do a second request since in the case of H2C we need to upgrade the // connection to H2 via the initial request. await(client(Request())) } finally { await(client.close()) await(server.close()) } } private def testDispatch( useH2: Boolean, clientAuth: ClientAuth, level: OpportunisticTls.Level, snooper: TlsSnooping.Snooper ): Unit = { if (shouldFail(clientAuth, level)) { assertThrows[ChannelClosedException] { doADispatch(useH2, clientAuth, level, snooper) } } else { doADispatch(useH2, clientAuth, level, snooper) } } def combos: Seq[(Boolean, ClientAuth, OpportunisticTls.Level, TlsSnooping.Snooper)] = { val authOptions = Seq( ClientAuth.Unspecified, ClientAuth.Off, ClientAuth.Wanted, ClientAuth.Needed ) val opportunisticLevels = Seq( OpportunisticTls.Off, OpportunisticTls.Desired, OpportunisticTls.Required ) val snoopers = Seq(TlsSnooping.Tls1XDetection) for { auth <- authOptions level <- opportunisticLevels snooper <- snoopers useH2 <- Seq(true, false) } yield (useH2, auth, level, snooper) } private[this] def shouldFail(clientAuth: ClientAuth, level: OpportunisticTls.Level): Boolean = { clientAuth == ClientAuth.Needed || level != OpportunisticTls.Desired } // Generate the tests combos.foreach { case (useH2, auth, level, snooper) => test(s"H2:$useH2, Client auth: $auth, Opportunistic level: $level") { testDispatch(useH2, auth, level, snooper) } } }
twitter/finagle
finagle-http/src/test/scala/com/twitter/finagle/TlsSnoopingEndToEndTest.scala
Scala
apache-2.0
3,490
/*********************************************************************** * Copyright (c) 2013-2022 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.fs.storage.orc.utils import java.nio.charset.StandardCharsets import java.util.UUID import org.apache.hadoop.io._ import org.apache.orc.TypeDescription import org.apache.orc.mapred.{OrcList, OrcMap, OrcStruct, OrcTimestamp} import org.locationtech.geomesa.fs.storage.orc.OrcFileSystemStorage import org.locationtech.geomesa.utils.geotools.ObjectType import org.locationtech.geomesa.utils.geotools.ObjectType.ObjectType import org.locationtech.geomesa.utils.text.WKBUtils import org.locationtech.jts.geom._ import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType} trait OrcOutputFormatWriter { def apply(sf: SimpleFeature, output: OrcStruct): Unit } object OrcOutputFormatWriter { def apply(sft: SimpleFeatureType, description: TypeDescription, fid: Boolean = true): OrcOutputFormatWriter = { val builder = Seq.newBuilder[OrcOutputFormatWriter] builder.sizeHint(sft.getAttributeCount + (if (fid) { 1 } else { 0 })) var i = 0 var col = 0 while (i < sft.getAttributeCount) { val descriptor = sft.getDescriptor(i) val bindings = ObjectType.selectType(descriptor) val reader = bindings.head match { case ObjectType.GEOMETRY => createGeometryWriter(bindings(1), col, i, description) case ObjectType.DATE => new DateOutputFormatWriter(col, i) case ObjectType.STRING => new StringOutputFormatWriter(col, i) case ObjectType.INT => new IntOutputFormatWriter(col, i) case ObjectType.LONG => new LongOutputFormatWriter(col, i) case ObjectType.FLOAT => new FloatOutputFormatWriter(col, i) case ObjectType.DOUBLE => new DoubleOutputFormatWriter(col, i) case ObjectType.BOOLEAN => new BooleanOutputFormatWriter(col, i) case ObjectType.BYTES => new BytesOutputFormatWriter(col, i) case ObjectType.UUID => new UuidOutputFormatWriter(col, i) case ObjectType.LIST => new ListOutputFormatWriter(col, i, bindings(1), description) case ObjectType.MAP => new MapOutputFormatWriter(col, i, bindings(1), bindings(2), description) case _ => throw new IllegalArgumentException(s"Unexpected object type ${bindings.head}") } builder += reader i += 1 col += OrcFileSystemStorage.fieldCount(descriptor) } if (fid) { builder += new FidOutputFormatWriter(col) } new SequenceOutputFormatWriter(builder.result) } private def createGeometryWriter( binding: ObjectType, col: Int, i: Int, description: TypeDescription): OrcOutputFormatWriter = { binding match { case ObjectType.POINT => new PointOutputFormatWriter(col, col + 1, i) case ObjectType.LINESTRING => new LineStringOutputFormatWriter(col, col + 1, i, description) case ObjectType.MULTIPOINT => new MultiPointOutputFormatWriter(col, col + 1, i, description) case ObjectType.POLYGON => new PolygonOutputFormatWriter(col, col + 1, i, description) case ObjectType.MULTILINESTRING => new MultiLineStringOutputFormatWriter(col, col + 1, i, description) case ObjectType.MULTIPOLYGON => new MultiPolygonOutputFormatWriter(col, col + 1, i, description) case ObjectType.GEOMETRY => new GeometryWkbOutputFormatWriter(col, i) case _ => throw new IllegalArgumentException(s"Unexpected geometry type $binding") } } class SequenceOutputFormatWriter(writers: Seq[OrcOutputFormatWriter]) extends OrcOutputFormatWriter { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = writers.foreach(_.apply(sf, output)) } class FidOutputFormatWriter(col: Int) extends OrcOutputFormatWriter with SetOutputFormatString { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = output.setFieldValue(col, setValue(sf.getID, output.getFieldValue(col))) } class DateOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatDate class StringOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatString class IntOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatInt class LongOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatLong class FloatOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatFloat class DoubleOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatDouble class BooleanOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatBoolean class BytesOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatBytes class UuidOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter with SetOutputFormatUuid /** * Reads a point attribute from a simple feature and sets it in an output format * * @param xCol index of x field, containing points * @param yCol index of y field, containing points * @param attribute simple feature attribute index */ class PointOutputFormatWriter(xCol: Int, yCol: Int, attribute: Int) extends OrcOutputFormatWriter with SetOutputFormatDouble { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { val point = sf.getAttribute(attribute).asInstanceOf[Point] if (point == null) { output.setFieldValue(xCol, null) output.setFieldValue(yCol, null) } else { var x = output.getFieldValue(xCol).asInstanceOf[DoubleWritable] var y = output.getFieldValue(yCol).asInstanceOf[DoubleWritable] if (x == null) { x = new DoubleWritable output.setFieldValue(xCol, x) } if (y == null) { y = new DoubleWritable output.setFieldValue(yCol, y) } x.set(point.getX) y.set(point.getY) } } } /** * Reads a linestring attribute from a simple feature and sets it in an output format. * A linestring is modeled as a list of points. * * @see PointOutputFormatWriter * * @param xCol index of x field, containing a list of points * @param yCol index of y field, containing a list of points * @param attribute simple feature attribute index */ class LineStringOutputFormatWriter(xCol: Int, yCol: Int, attribute: Int, description: TypeDescription) extends OrcOutputFormatWriter with SetOutputFormatDouble { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { val line = sf.getAttribute(attribute).asInstanceOf[LineString] if (line == null) { output.setFieldValue(xCol, null) output.setFieldValue(yCol, null) } else { var x = output.getFieldValue(xCol).asInstanceOf[OrcList[DoubleWritable]] var y = output.getFieldValue(yCol).asInstanceOf[OrcList[DoubleWritable]] if (x == null) { x = new OrcList[DoubleWritable](description.getChildren.get(xCol), line.getNumPoints) output.setFieldValue(xCol, x) } else { x.clear() } if (y == null) { y = new OrcList[DoubleWritable](description.getChildren.get(yCol), line.getNumPoints) output.setFieldValue(yCol, y) } else { y.clear() } var i = 0 while (i < line.getNumPoints) { val pt = line.getCoordinateN(i) x.add(new DoubleWritable(pt.x)) y.add(new DoubleWritable(pt.y)) i += 1 } } } } /** * Reads a multi-point attribute from a simple feature and sets it in an output format. * A multi-point is modeled as a list of points. * * @see PointOutputFormatWriter * * @param xCol index of x field, containing a list of points * @param yCol index of y field, containing a list of points * @param attribute simple feature attribute index */ class MultiPointOutputFormatWriter(xCol: Int, yCol: Int, attribute: Int, description: TypeDescription) extends OrcOutputFormatWriter with SetOutputFormatDouble { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { val multiPoint = sf.getAttribute(attribute).asInstanceOf[MultiPoint] if (multiPoint == null) { output.setFieldValue(xCol, null) output.setFieldValue(yCol, null) } else { var x = output.getFieldValue(xCol).asInstanceOf[OrcList[DoubleWritable]] var y = output.getFieldValue(yCol).asInstanceOf[OrcList[DoubleWritable]] if (x == null) { x = new OrcList[DoubleWritable](description.getChildren.get(xCol), multiPoint.getNumPoints) output.setFieldValue(xCol, x) } else { x.clear() } if (y == null) { y = new OrcList[DoubleWritable](description.getChildren.get(yCol), multiPoint.getNumPoints) output.setFieldValue(yCol, y) } else { y.clear() } var i = 0 while (i < multiPoint.getNumPoints) { val pt = multiPoint.getGeometryN(i).asInstanceOf[Point] x.add(new DoubleWritable(pt.getX)) y.add(new DoubleWritable(pt.getY)) i += 1 } } } } /** * Reads a polygon attribute from a simple feature and sets it in an output format. * A polygon is modeled as a list of lines, with the first value being the shell, * and any subsequent values being interior holes. * * @see LineStringOutputFormatWriter * * @param xCol index of x field, containing a list of lists of points * @param yCol index of y field, containing a list of lists of points * @param attribute simple feature attribute index */ class PolygonOutputFormatWriter(xCol: Int, yCol: Int, attribute: Int, description: TypeDescription) extends OrcOutputFormatWriter with SetOutputFormatDouble { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { val polygon = sf.getAttribute(attribute).asInstanceOf[Polygon] if (polygon == null) { output.setFieldValue(xCol, null) output.setFieldValue(yCol, null) } else { var xx = output.getFieldValue(xCol).asInstanceOf[OrcList[OrcList[DoubleWritable]]] var yy = output.getFieldValue(yCol).asInstanceOf[OrcList[OrcList[DoubleWritable]]] if (xx == null) { xx = new OrcList[OrcList[DoubleWritable]](description.getChildren.get(xCol), polygon.getNumInteriorRing + 1) output.setFieldValue(xCol, xx) } else { xx.clear() } if (yy == null) { yy = new OrcList[OrcList[DoubleWritable]](description.getChildren.get(yCol), polygon.getNumInteriorRing + 1) output.setFieldValue(yCol, yy) } else { yy.clear() } var j = 0 while (j < polygon.getNumInteriorRing + 1) { val line = if (j == 0) { polygon.getExteriorRing } else { polygon.getInteriorRingN(j - 1) } val x = new OrcList[DoubleWritable](description.getChildren.get(xCol).getChildren.get(0), line.getNumPoints) val y = new OrcList[DoubleWritable](description.getChildren.get(yCol).getChildren.get(0), line.getNumPoints) var i = 0 while (i < line.getNumPoints) { val pt = line.getCoordinateN(i) x.add(new DoubleWritable(pt.x)) y.add(new DoubleWritable(pt.y)) i += 1 } xx.add(x) yy.add(y) j += 1 } } } } /** * Reads a multi-linestring attribute from a simple feature and sets it in an output format. * A multi-linestring is modeled as a list of lines. * * @see LineStringOutputFormatWriter * * @param xCol index of x field, containing a list of lists of points * @param yCol index of y field, containing a list of lists of points * @param attribute simple feature attribute index */ class MultiLineStringOutputFormatWriter(xCol: Int, yCol: Int, attribute: Int, description: TypeDescription) extends OrcOutputFormatWriter with SetOutputFormatDouble { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { val multiLineString = sf.getAttribute(attribute).asInstanceOf[MultiLineString] if (multiLineString == null) { output.setFieldValue(xCol, null) output.setFieldValue(yCol, null) } else { var xx = output.getFieldValue(xCol).asInstanceOf[OrcList[OrcList[DoubleWritable]]] var yy = output.getFieldValue(yCol).asInstanceOf[OrcList[OrcList[DoubleWritable]]] if (xx == null) { xx = new OrcList[OrcList[DoubleWritable]](description.getChildren.get(xCol), multiLineString.getNumGeometries) output.setFieldValue(xCol, xx) } else { xx.clear() } if (yy == null) { yy = new OrcList[OrcList[DoubleWritable]](description.getChildren.get(yCol), multiLineString.getNumGeometries) output.setFieldValue(yCol, yy) } else { yy.clear() } var j = 0 while (j < multiLineString.getNumGeometries) { val line = multiLineString.getGeometryN(j).asInstanceOf[LineString] val x = new OrcList[DoubleWritable](description.getChildren.get(xCol).getChildren.get(0), line.getNumPoints) val y = new OrcList[DoubleWritable](description.getChildren.get(yCol).getChildren.get(0), line.getNumPoints) var i = 0 while (i < line.getNumPoints) { val pt = line.getCoordinateN(i) x.add(new DoubleWritable(pt.x)) y.add(new DoubleWritable(pt.y)) i += 1 } xx.add(x) yy.add(y) j += 1 } } } } /** * Reads a multi-polygon attribute from a simple feature and sets it in an output format. * A multi-polygon is modeled as a list of polygons. * * @see PolygonOutputFormatWriter * * @param xCol index of x field, containing a list of lists of lists of points * @param yCol index of y field, containing a list of lists of lists of points * @param attribute simple feature attribute index */ class MultiPolygonOutputFormatWriter(xCol: Int, yCol: Int, attribute: Int, description: TypeDescription) extends OrcOutputFormatWriter with SetOutputFormatDouble { override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { val multiPolygon = sf.getAttribute(attribute).asInstanceOf[MultiPolygon] if (multiPolygon == null) { output.setFieldValue(xCol, null) output.setFieldValue(yCol, null) } else { var xxx = output.getFieldValue(xCol).asInstanceOf[OrcList[OrcList[OrcList[DoubleWritable]]]] var yyy = output.getFieldValue(yCol).asInstanceOf[OrcList[OrcList[OrcList[DoubleWritable]]]] if (xxx == null) { xxx = new OrcList[OrcList[OrcList[DoubleWritable]]](description.getChildren.get(xCol), multiPolygon.getNumGeometries) output.setFieldValue(xCol, xxx) } else { xxx.clear() } if (yyy == null) { yyy = new OrcList[OrcList[OrcList[DoubleWritable]]](description.getChildren.get(yCol), multiPolygon.getNumGeometries) output.setFieldValue(yCol, yyy) } else { yyy.clear() } var k = 0 while (k < multiPolygon.getNumGeometries) { val polygon = multiPolygon.getGeometryN(k).asInstanceOf[Polygon] val xx = new OrcList[OrcList[DoubleWritable]](description.getChildren.get(xCol).getChildren.get(0), polygon.getNumGeometries) val yy = new OrcList[OrcList[DoubleWritable]](description.getChildren.get(yCol).getChildren.get(0), polygon.getNumGeometries) var j = 0 while (j < polygon.getNumInteriorRing + 1) { val line = if (j == 0) { polygon.getExteriorRing } else { polygon.getInteriorRingN(j - 1) } val x = new OrcList[DoubleWritable](description.getChildren.get(xCol).getChildren.get(0), line.getNumPoints) val y = new OrcList[DoubleWritable](description.getChildren.get(yCol).getChildren.get(0), line.getNumPoints) var i = 0 while (i < line.getNumPoints) { val pt = line.getCoordinateN(i) x.add(new DoubleWritable(pt.x)) y.add(new DoubleWritable(pt.y)) i += 1 } xx.add(x) yy.add(y) j += 1 } xxx.add(xx) yyy.add(yy) k += 1 } } } } class GeometryWkbOutputFormatWriter(val col: Int, val attribute: Int) extends OutputFormatWriterAdapter { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[BytesWritable] if (field == null) { field = new BytesWritable } val bytes = WKBUtils.write(value.asInstanceOf[Geometry]) field.set(bytes, 0, bytes.length) field } } } class ListOutputFormatWriter(col: Int, attribute: Int, binding: ObjectType, description: TypeDescription) extends OrcOutputFormatWriter { private val converter = getConverter(binding) override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { import scala.collection.JavaConversions._ val value = sf.getAttribute(attribute).asInstanceOf[java.util.List[AnyRef]] if (value == null) { output.setFieldValue(col, null) } else { var field = output.getFieldValue(col).asInstanceOf[OrcList[WritableComparable[_]]] if (field == null) { field = new OrcList(description.getChildren.get(col), value.size()) output.setFieldValue(col, field) } else { field.clear() } value.foreach(v => field.add(converter.setValue(v, null))) } } } class MapOutputFormatWriter(col: Int, attribute: Int, keyBinding: ObjectType, valueBinding: ObjectType, description: TypeDescription) extends OrcOutputFormatWriter { private val keyConverter = getConverter(keyBinding) private val valueConverter = getConverter(valueBinding) override def apply(sf: SimpleFeature, output: OrcStruct): Unit = { import scala.collection.JavaConversions._ val value = sf.getAttribute(attribute).asInstanceOf[java.util.Map[AnyRef, AnyRef]] if (value == null) { output.setFieldValue(col, null) } else { var field = output.getFieldValue(col).asInstanceOf[OrcMap[WritableComparable[_], WritableComparable[_]]] if (field == null) { field = new OrcMap(description.getChildren.get(col)) output.setFieldValue(col, field) } else { field.clear() } value.foreach { case (k, v) => field.put(keyConverter.setValue(k, null), valueConverter.setValue(v, null)) } } } } trait SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] } trait OutputFormatWriterAdapter extends OrcOutputFormatWriter with SetOutputFormatValue { def col: Int def attribute: Int override def apply(sf: SimpleFeature, output: OrcStruct): Unit = output.setFieldValue(col, setValue(sf.getAttribute(attribute), output.getFieldValue(col))) } trait SetOutputFormatDate extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[OrcTimestamp] if (field == null) { field = new OrcTimestamp } field.setTime(value.asInstanceOf[java.util.Date].getTime) field } } } trait SetOutputFormatString extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[Text] if (field == null) { field = new Text } field.set(value.asInstanceOf[String].getBytes(StandardCharsets.UTF_8)) field } } } trait SetOutputFormatInt extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[IntWritable] if (field == null) { field = new IntWritable } field.set(value.asInstanceOf[Integer].intValue) field } } } trait SetOutputFormatLong extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[LongWritable] if (field == null) { field = new LongWritable } field.set(value.asInstanceOf[java.lang.Long].longValue) field } } } trait SetOutputFormatFloat extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[FloatWritable] if (field == null) { field = new FloatWritable } field.set(value.asInstanceOf[java.lang.Float].floatValue) field } } } trait SetOutputFormatDouble extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[DoubleWritable] if (field == null) { field = new DoubleWritable } field.set(value.asInstanceOf[java.lang.Double].doubleValue) field } } } trait SetOutputFormatBoolean extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[BooleanWritable] if (field == null) { field = new BooleanWritable } field.set(value.asInstanceOf[java.lang.Boolean].booleanValue) field } } } trait SetOutputFormatBytes extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[BytesWritable] if (field == null) { field = new BytesWritable } val bytes = value.asInstanceOf[Array[Byte]] field.set(bytes, 0, bytes.length) field } } } trait SetOutputFormatUuid extends SetOutputFormatValue { def setValue(value: AnyRef, existing: WritableComparable[_]): WritableComparable[_] = { if (value == null) { null } else { var field = existing.asInstanceOf[Text] if (field == null) { field = new Text } field.set(value.asInstanceOf[UUID].toString.getBytes(StandardCharsets.UTF_8)) field } } } /** * Gets a converter instance * * @param binding binding * @return */ private def getConverter(binding: ObjectType): SetOutputFormatValue = { binding match { case ObjectType.DATE => new SetOutputFormatDate {} case ObjectType.STRING => new SetOutputFormatString {} case ObjectType.INT => new SetOutputFormatInt {} case ObjectType.LONG => new SetOutputFormatLong {} case ObjectType.FLOAT => new SetOutputFormatFloat {} case ObjectType.DOUBLE => new SetOutputFormatDouble {} case ObjectType.BOOLEAN => new SetOutputFormatBoolean {} case ObjectType.BYTES => new SetOutputFormatBytes {} case ObjectType.UUID => new SetOutputFormatUuid {} case _ => throw new IllegalArgumentException(s"Unexpected object type $binding") } } }
locationtech/geomesa
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-orc/src/main/scala/org/locationtech/geomesa/fs/storage/orc/utils/OrcOutputFormatWriter.scala
Scala
apache-2.0
24,916
/** * Copyright (C) 2010-2012 LShift Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.lshift.diffa.agent.itest.config import net.lshift.diffa.agent.itest.support.TestConstants._ import org.junit.Test import net.lshift.diffa.kernel.frontend.{PairDef, EndpointDef} import net.lshift.diffa.agent.client.ConfigurationRestClient import org.apache.http.impl.client.DefaultHttpClient import org.apache.http.client.methods.HttpGet import org.junit.Assert._ import org.apache.http.auth.{AuthScope, Credentials, UsernamePasswordCredentials} class EtagTest { val configClient = new ConfigurationRestClient(agentURL, domain) @Test def configChangeShouldUpgradeEtag { val up = EndpointDef(name = "some-upstream-endpoint") val down = EndpointDef(name = "some-downstream-endpoint") val pair = PairDef(key = "some-pair", upstreamName = up.name, downstreamName = down.name) val oldTag = getAggregatesEtag configClient.declareEndpoint(up) configClient.declareEndpoint(down) configClient.declarePair(pair) val newTag = getAggregatesEtag assertNotSame("Old etag was %s, new etag was %s".format(oldTag,newTag), oldTag, newTag) } private def getAggregatesEtag = { val httpClient = new DefaultHttpClient val creds = new UsernamePasswordCredentials(agentUsername, agentPassword) httpClient.getCredentialsProvider().setCredentials(new AuthScope(agentHost, agentPort), creds); val httpResponse = httpClient.execute(new HttpGet(agentURL + "/domains/diffa/diffs/aggregates")) val etag = httpResponse.getLastHeader("ETag") httpClient.getConnectionManager.shutdown() etag.getValue } }
aprescott/diffa
agent/src/test/scala/net/lshift/diffa/agent/itest/config/EtagTest.scala
Scala
apache-2.0
2,179
package com.markfeeney.circlet import javax.servlet.http.{HttpServletRequest, HttpServletResponse} import com.markfeeney.circlet.JettyOptions.ClientAuth.{Need, Want} import com.markfeeney.circlet.JettyOptions.SslStoreConfig.{Instance, Path} import org.eclipse.jetty.server.handler.{AbstractHandler, ContextHandler, HandlerList} import org.eclipse.jetty.server.{Request => JettyRequest, _} import org.eclipse.jetty.util.ssl.SslContextFactory import org.eclipse.jetty.util.thread.{QueuedThreadPool, ThreadPool} import org.eclipse.jetty.websocket.api.{Session, WebSocketAdapter} import org.eclipse.jetty.websocket.server.WebSocketHandler import org.eclipse.jetty.websocket.servlet.{ServletUpgradeRequest, ServletUpgradeResponse, WebSocketCreator, WebSocketServletFactory} /** * Functionality for running handlers on Jetty. * Largely a port of https://github.com/ring-clojure/ring/blob/4a3584570ad9e7b17f6b1c8a2a17934c1682f77d/ring-jetty-adapter/src/ring/adapter/jetty.clj */ object JettyAdapter { private def createThreadPool(opts: JettyOptions): ThreadPool = { val pool = new QueuedThreadPool(opts.maxThreads) pool.setMinThreads(opts.minThreads) pool.setDaemon(opts.daemonThreads) pool } private def httpConfig(opts: JettyOptions): HttpConfiguration = { val c = new HttpConfiguration() c.setSendDateHeader(opts.sendDateHeader) c.setOutputBufferSize(opts.outputBufferSize) c.setRequestHeaderSize(opts.requestHeaderSize) c.setResponseHeaderSize(opts.responseHeaderSize) c.setSendServerVersion(opts.sendServerVersion) c } private def serverConnector( server: Server, factories: Seq[ConnectionFactory]): ServerConnector = { new ServerConnector(server, factories:_*) } private def httpConnector(server: Server, opts: JettyOptions): ServerConnector = { val factory = new HttpConnectionFactory(httpConfig(opts)) val connector = serverConnector(server, Seq(factory)) connector.setPort(opts.httpPort) opts.host.foreach(connector.setHost) connector.setIdleTimeout(opts.maxIdleTime) connector } private def sslContextFactory(opts: JettyOptions): SslContextFactory = { val context = new SslContextFactory opts.keyStore.foreach { case Path(path) => context.setKeyStorePath(path) case Instance(keyStore) => context.setKeyStore(keyStore) } opts.keyStorePassword.foreach(context.setKeyStorePassword) opts.trustStore.foreach { case Path(path) => context.setTrustStorePath(path) case Instance(keyStore) => context.setTrustStore(keyStore) } opts.trustStorePassword.foreach(context.setTrustStorePassword) opts.clientAuth.foreach { case Need => context.setNeedClientAuth(true) case Want => context.setWantClientAuth(true) } if (opts.excludeCiphers.nonEmpty) { context.setExcludeCipherSuites(opts.excludeCiphers: _*) } if (opts.excludeProtocols.nonEmpty) { context.setExcludeProtocols(opts.excludeProtocols: _*) } context } private def sslConnector(server: Server, opts: JettyOptions): ServerConnector = { val httpFactory = { val config = httpConfig(opts) config.setSecureScheme("https") config.setSecurePort(opts.sslPort) config.addCustomizer(new SecureRequestCustomizer) new HttpConnectionFactory(config) } val sslFactory = new SslConnectionFactory(sslContextFactory(opts), "http/1.1") val conn = serverConnector(server, Seq(sslFactory, httpFactory)) conn.setPort(opts.sslPort) opts.host.foreach(conn.setHost) conn.setIdleTimeout(opts.maxIdleTime) conn } private def createServer(opts: JettyOptions): Server = { val server = new Server(createThreadPool(opts)) if (opts.allowHttp) { server.addConnector(httpConnector(server, opts)) } if (opts.allowSsl) { server.addConnector(sslConnector(server, opts)) } server } private def wsAdapter(ws: JettyWebSocket) = new WebSocketAdapter { override def onWebSocketConnect(s: Session): Unit = { super.onWebSocketConnect(s) ws.onConnect(s) } override def onWebSocketError(cause: Throwable): Unit = { ws.onError(this.getSession, cause) } override def onWebSocketText(message: String): Unit = { ws.onText(this.getSession, message) } override def onWebSocketBinary(payload: Array[Byte], offset: Int, len: Int): Unit = { ws.onBytes(this.getSession, payload, offset, len) } override def onWebSocketClose(statusCode: Int, reason: String): Unit = { try { ws.onClose(this.getSession, statusCode, reason) } finally { super.onWebSocketClose(statusCode, reason) } } } private def wsCreator(ws: JettyWebSocket): WebSocketCreator = { new WebSocketCreator { override def createWebSocket(req: ServletUpgradeRequest, resp: ServletUpgradeResponse): AnyRef = { wsAdapter(ws) } } } private def wsHandler(ws: JettyWebSocket, maxWsIdleTime: Int): AbstractHandler = { new WebSocketHandler() { override def configure(factory: WebSocketServletFactory): Unit = { factory.getPolicy.setIdleTimeout(maxWsIdleTime) factory.setCreator(wsCreator(ws)) } } } private def wsHandlers(opts: JettyOptions): Seq[ContextHandler] = { opts.webSockets.map { case (path, ws) => val ctx = new ContextHandler ctx.setContextPath(path) ctx.setHandler(wsHandler(ws, opts.maxWsIdleTime)) ctx }.toSeq } /** * Create, configure and start a Jetty server instance and use it to run handler. */ def run(handler: Handler, opts: JettyOptions = JettyOptions()): Server = { // The main app handler gets wrapped in a single Jetty handler instance... val ah = new AbstractHandler { override def handle( target: String, baseRequest: JettyRequest, request: HttpServletRequest, response: HttpServletResponse): Unit = { val req: Request = Servlet.buildRequest(request) handler(req) { optResp => val resp = optResp.getOrElse { // TBD if this is a good way to handle this case Response(body = "No response generated", status = 500) } Servlet.updateServletResponse(response, resp) Sent } baseRequest.setHandled(true) } } // ... then each websocket also gets its own handler (kind of a bolted-on // approach, but it works for now). We build a big list of all handlers // and register them with the server below. Approach borrowed from // https://github.com/sunng87/ring-jetty9-adapter val allHandlers = (wsHandlers(opts) :+ ah) .foldLeft(new HandlerList) { (acc, h) => acc.addHandler(h) acc } val server = createServer(opts) server.setHandler(allHandlers) opts.configFn(server) try { server.start() if (opts.join) { server.join() } server } catch { case e: Exception => server.stop() throw e } } }
overthink/circlet
src/main/scala/com/markfeeney/circlet/JettyAdapter.scala
Scala
mit
7,106
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.box import play.api.libs.json.Json case class CtValidation(boxId: Option[String], errorMessageKey: String, args:Option[Seq[String]] = None) { def isGlobalError = boxId.isEmpty } object CtValidation { lazy implicit val format = Json.format[CtValidation] }
hmrc/ct-calculations
src/main/scala/uk/gov/hmrc/ct/box/CtValidation.scala
Scala
apache-2.0
890
package model.validation import play.api.libs.json._ import play.api.libs.json.Reads._ import play.api.libs.json.Writes._ import play.api.libs.functional.syntax._ import model.{Recipe, Ingredient} /** * @author knm */ object RecipeValidator { implicit val ingredientWrites: Writes[Ingredient] = ( (__ \\ "amount").write[BigDecimal] and (__ \\ "name").write[String] and (__ \\ "unit").write[Option[String]] )(unlift(Ingredient.unapply)) implicit val ingredientReads: Reads[Ingredient] = ( (__ \\ "amount").read[BigDecimal](min(BigDecimal(0))) and (__ \\ "name").read[String](minLength[String](2)) and (__ \\ "unit").read[Option[String]] )(Ingredient.apply _) implicit val recipewrites: Writes[Recipe] = ( (__ \\ "title").write[String] and (__ \\ "alternateTitle").write[Option[String]] and (__ \\ "instruction").write[Option[String]] and (__ \\ "comment").write[Option[String]] and (__ \\ "ingredients").write[List[Ingredient]] and (__ \\ "keywords").write[Option[List[String]]] and (__ \\ "timeRequired").write[Option[BigDecimal]] and (__ \\ "url").write[Option[String]] )(unlift(Recipe.unapply)) implicit val recipereads: Reads[Recipe] = ( (__ \\ "title").read[String](minLength[String](2)) and (__ \\ "alternateTitle").read[Option[String]] and (__ \\ "instruction").read[Option[String]] and (__ \\ "comment").read[Option[String]] and (__ \\ "ingredients").read[List[Ingredient]] and (__ \\ "keywords").read[Option[List[String]]] and (__ \\ "timeRequired").read[Option[BigDecimal]] and (__ \\ "url").read[Option[String]] )(Recipe.apply _) }
linuxswords/play-recipes
app/model/validation/RecipeValidator.scala
Scala
apache-2.0
1,678
package f0 case class EffectW[+F]() { def erase: EffectW[Nothing] = this.asInstanceOf[EffectW[Nothing]] } case class EffectR[-F]() { def erase: EffectR[Any] = this.asInstanceOf[EffectR[Any]] } object Effects { private val _effectW = EffectW() private val _effectR = EffectR() def effectW[F] = _effectW.asInstanceOf[EffectW[F]] def effectR[F] = _effectR.asInstanceOf[EffectR[F]] }
khalen/f0
src/main/scala/f0/Effects.scala
Scala
mit
390