code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package tap.interpreter
case class InterpreterError(msg: String) extends Exception(msg)
case class InterpreterRuntimeError(msg: String) extends RuntimeException(msg)
case class InterpreterMatchError(msg: String) extends Exception(msg)
| garyb/tap | src/main/scala/tap/interpreter/InterpreterError.scala | Scala | mit | 236 |
///////////////////////////////////////////////////////////////////////////////
// hadoop.scala
//
// Copyright (C) 2011 Ben Wing, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.fieldspring.util
import org.apache.hadoop.io._
import org.apache.hadoop.util._
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.conf.{Configuration, Configured}
import org.apache.hadoop.fs._
// The following says to import everything except java.io.FileSystem, because
// it conflicts with Hadoop's FileSystem. (Technically, it imports everything
// but in the process aliases FileSystem to _, which has the effect of making
// it inaccessible. _ is special in Scala and has various meanings.)
import java.io.{FileSystem=>_,_}
import java.net.URI
import opennlp.fieldspring.util.argparser._
import opennlp.fieldspring.util.collectionutil._
import opennlp.fieldspring.util.textdbutil._
import opennlp.fieldspring.util.experiment._
import opennlp.fieldspring.util.ioutil._
import opennlp.fieldspring.util.printutil.{errprint, set_errout_prefix}
package object hadoop {
class HadoopFileHandler(conf: Configuration) extends FileHandler {
protected def get_file_system(filename: String) = {
FileSystem.get(URI.create(filename), conf)
}
protected def file_not_found(path: String) =
throw new FileNotFoundException(
"No such file or directory: %s" format path)
def get_raw_input_stream(filename: String) =
get_file_system(filename).open(new Path(filename))
def get_raw_output_stream(filename: String, append: Boolean) = {
val fs = get_file_system(filename)
val path = new Path(filename)
if (append)
fs.append(path)
else
fs.create(path)
}
def split_filename(filename: String) = {
val path = new Path(filename)
(path.getParent.toString, path.getName)
}
def join_filename(dir: String, file: String) =
new Path(dir, file).toString
def is_directory(filename: String) = {
val status = get_file_system(filename).getFileStatus(new Path(filename))
if (status == null)
file_not_found(filename)
status.isDir
}
def make_directories(filename: String):Boolean =
get_file_system(filename).mkdirs(new Path(filename))
def list_files(dir: String) = {
val status = get_file_system(dir).listStatus(new Path(dir))
if (status == null)
file_not_found(dir)
for (file <- status)
yield file.getPath.toString
}
}
object HadoopExperimentConfiguration {
/* Prefix used for storing parameters in a Hadoop configuration */
val hadoop_conf_prefix = "fieldspring."
/**
* Convert the parameters in `parser` to Hadoop configuration settings in
* `conf`.
*
* @param prefix Prefix to prepend to the names of all parameters.
* @param parser ArgParser object to retrieve parameters from.
* @param conf Configuration object to store configuration settings into.
*/
def convert_parameters_to_hadoop_conf(prefix: String, parser: ArgParser,
conf: Configuration) {
for (name <- parser.argNames if parser.specified(name)) {
val confname = prefix + name
parser(name) match {
case e:Int => conf.setInt(confname, e)
case e:Long => conf.setLong(confname, e)
case e:Float => conf.setFloat(confname, e)
case e:Double => conf.setFloat(confname, e.toFloat)
case e:String => conf.set(confname, e)
case e:Boolean => conf.setBoolean(confname, e)
case e:Seq[_] => {
val multitype = parser.getMultiType(name)
if (multitype == classOf[String]) {
conf.setStrings(confname, parser.get[Seq[String]](name): _*)
} else
throw new UnsupportedOperationException(
"Don't know how to store sequence of type %s of parameter %s into a Hadoop Configuration"
format (multitype, name))
}
case ty@_ => {
throw new UnsupportedOperationException(
"Don't know how to store type %s of parameter %s into a Hadoop Configuration"
format (ty, name))
}
}
}
}
/**
* Convert the relevant Hadoop configuration settings in `conf`
* into the given ArgParser.
*
* @param prefix Prefix to prepend to the names of all parameters.
* @param parser ArgParser object to store parameters into. The names
* of parameters to fetch are taken from this object.
* @param conf Configuration object to retrieve settings from.
*/
def convert_parameters_from_hadoop_conf(prefix: String, parser: ArgParser,
conf: Configuration) {
// Configuration.dumpConfiguration(conf, new PrintWriter(System.err))
for {name <- parser.argNames
confname = prefix + name
if conf.getRaw(confname) != null} {
val confname = prefix + name
val ty = parser.getType(name)
if (ty == classOf[Int])
parser.set[Int](name, conf.getInt(confname, parser.defaultValue[Int](name)))
else if (ty == classOf[Long])
parser.set[Long](name, conf.getLong(confname, parser.defaultValue[Long](name)))
else if (ty == classOf[Float])
parser.set[Float](name, conf.getFloat(confname, parser.defaultValue[Float](name)))
else if (ty == classOf[Double])
parser.set[Double](name, conf.getFloat(confname, parser.defaultValue[Double](name).toFloat).toDouble)
else if (ty == classOf[String])
parser.set[String](name, conf.get(confname, parser.defaultValue[String](name)))
else if (ty == classOf[Boolean])
parser.set[Boolean](name, conf.getBoolean(confname, parser.defaultValue[Boolean](name)))
else if (ty == classOf[Seq[_]]) {
val multitype = parser.getMultiType(name)
if (multitype == classOf[String])
parser.set[Seq[String]](name, conf.getStrings(confname, parser.defaultValue[Seq[String]](name): _*).toSeq)
else
throw new UnsupportedOperationException(
"Don't know how to fetch sequence of type %s of parameter %s from a Hadoop Configuration"
format (multitype, name))
} else {
throw new UnsupportedOperationException(
"Don't know how to store fetch %s of parameter %s from a Hadoop Configuration"
format (ty, name))
}
}
}
}
trait HadoopExperimentDriverApp extends ExperimentDriverApp {
var hadoop_conf: Configuration = _
override type TDriver <: HadoopExperimentDriver
/* Set by subclass -- Initialize the various classes for map and reduce */
def initialize_hadoop_classes(job: Job)
/* Set by subclass -- Set the settings for reading appropriate input files,
possibly based on command line arguments */
def initialize_hadoop_input(job: Job)
/* Called after command-line arguments have been read, verified,
canonicalized and stored into `arg_parser`. We convert the arguments
into configuration variables in the Hadoop configuration -- this is
one way to get "side data" passed into a mapper, and is designed
exactly for things like command-line arguments. (For big chunks of
side data, it's better to use the Hadoop file system.) Then we
tell Hadoop about the classes used to do map and reduce by calling
initialize_hadoop_classes(), set the input and output files, and
actually run the job.
*/
override def run_program() = {
import HadoopExperimentConfiguration._
convert_parameters_to_hadoop_conf(hadoop_conf_prefix, arg_parser,
hadoop_conf)
val job = new Job(hadoop_conf, progname)
/* We have to call set_job() here now, and not earlier. This is the
"bootstrapping issue" alluded to in the comments on
HadoopExperimentDriver. We can't set the Job until it's created,
and we can't create the Job until after we have set the appropriate
Fieldspring configuration parameters from the command-line arguments --
but, we need the driver already created in order to parse the
command-line arguments, because it participates in that process. */
driver.set_job(job)
initialize_hadoop_classes(job)
initialize_hadoop_input(job)
if (job.waitForCompletion(true)) 0 else 1
}
class HadoopExperimentTool extends Configured with Tool {
override def run(args: Array[String]) = {
/* Set the Hadoop configuration object and then thread execution
back to the ExperimentApp. This will read command-line arguments,
call initialize_parameters() on GeolocateApp to verify
and canonicalize them, and then pass control back to us by
calling run_program(), which we override. */
hadoop_conf = getConf()
set_errout_prefix(progname + ": ")
implement_main(args)
}
}
override def main(args: Array[String]) {
val exitCode = ToolRunner.run(new HadoopExperimentTool(), args)
System.exit(exitCode)
}
}
trait HadoopTextDBApp extends HadoopExperimentDriverApp {
def corpus_suffix: String
def corpus_dirs: Iterable[String]
def initialize_hadoop_input(job: Job) {
/* A very simple file processor that does nothing but note the files
seen, for Hadoop's benefit. */
class RetrieveDocumentFilesFileProcessor(
suffix: String
) extends TextDBLineProcessor[Unit](suffix) {
def process_lines(lines: Iterator[String],
filehand: FileHandler, file: String,
compression: String, realname: String) = {
errprint("Called with %s", file)
FileInputFormat.addInputPath(job, new Path(file))
(true, ())
}
}
val fileproc = new RetrieveDocumentFilesFileProcessor(
// driver.params.eval_set + "-" + driver.document_file_suffix
corpus_suffix
)
fileproc.process_files(driver.get_file_handler, corpus_dirs)
// FileOutputFormat.setOutputPath(job, new Path(params.outfile))
}
}
/**
* Base mix-in for an Experiment application using Hadoop.
*
* @see HadoopExperimentDriver
*/
trait BaseHadoopExperimentDriver extends
HadoopableArgParserExperimentDriver {
/**
* FileHandler object for this driver.
*/
private lazy val hadoop_file_handler =
new HadoopFileHandler(get_configuration)
override def get_file_handler: FileHandler = hadoop_file_handler
// override type TParam <: HadoopExperimentParameters
/* Implementation of the driver statistics mix-in (ExperimentDriverStats)
that store counters in Hadoop. find_split_counter needs to be
implemented. */
/**
* Find the Counter object for the given counter, split into the
* group and tail components. The way to do this depends on whether
* we're running the job driver on the client, or a map or reduce task
* on a tasktracker.
*/
protected def find_split_counter(group: String, tail: String): Counter
def get_job_context: JobContext
def get_configuration = get_job_context.getConfiguration
def get_task_id = get_configuration.getInt("mapred.task.partition", -1)
/**
* Find the Counter object for the given counter.
*/
protected def find_counter(name: String) = {
val (group, counter) = split_counter(name)
find_split_counter(group, counter)
}
protected def imp_increment_counter(name: String, incr: Long) {
val counter = find_counter(name)
counter.increment(incr)
}
protected def imp_get_counter(name: String) = {
val counter = find_counter(name)
counter.getValue()
}
}
/**
* Mix-in for an Experiment application using Hadoop. This is a trait
* because it should be mixed into a class providing the implementation of
* an application in a way that is indifferent to whether it's being run
* stand-alone or in Hadoop.
*
* This is used both in map/reduce task code and in the client job-running
* code. In some ways it would be cleaner to have separate classes for
* task vs. client job code, but that would entail additional boilerplate
* for any individual apps as they'd have to create separate task and
* client job versions of each class along with a base superclass for the
* two.
*/
trait HadoopExperimentDriver extends BaseHadoopExperimentDriver {
var job: Job = _
var context: TaskInputOutputContext[_,_,_,_] = _
/**
* Set the task context, if we're running in the map or reduce task
* code on a tasktracker. (Both Mapper.Context and Reducer.Context are
* subclasses of TaskInputOutputContext.)
*/
def set_task_context(context: TaskInputOutputContext[_,_,_,_]) {
this.context = context
}
/**
* Set the Job object, if we're running the job-running code on the
* client. (Note that we have to set the job like this, rather than have
* it passed in at creation time, e.g. through an abstract field,
* because of bootstrapping issues; explained in HadoopExperimentApp.)
*/
def set_job(job: Job) {
this.job = job
}
def get_job_context = {
if (context != null) context
else if (job != null) job
else need_to_set_context()
}
def find_split_counter(group: String, counter: String) = {
if (context != null)
context.getCounter(group, counter)
else if (job != null)
job.getCounters.findCounter(group, counter)
else
need_to_set_context()
}
def need_to_set_context() =
throw new IllegalStateException("Either task context or job needs to be set before any counter operations")
override def heartbeat() {
if (context != null)
context.progress
}
}
trait HadoopExperimentMapReducer {
type TContext <: TaskInputOutputContext[_,_,_,_]
type TDriver <: HadoopExperimentDriver
val driver = create_driver()
type TParam = driver.TParam
def progname: String
def create_param_object(ap: ArgParser): TParam
def create_driver(): TDriver
/** Originally this was simply called 'setup', but used only for a
* trait that could be mixed into a mapper. Expanding this to allow
* it to be mixed into both a mapper and a reducer didn't cause problems
* but creating a subtrait that override this function did cause problems,
* a complicated message like this:
[error] /Users/benwing/devel/fieldspring/src/main/scala/opennlp/fieldspring/preprocess/GroupCorpus.scala:208: overriding method setup in class Mapper of type (x$1: org.apache.hadoop.mapreduce.Mapper[java.lang.Object,org.apache.hadoop.io.Text,org.apache.hadoop.io.Text,org.apache.hadoop.io.Text]#Context)Unit;
[error] method setup in trait GroupCorpusMapReducer of type (context: GroupCorpusMapper.this.TContext)Unit cannot override a concrete member without a third member that's overridden by both (this rule is designed to prevent ``accidental overrides'')
[error] class GroupCorpusMapper extends
[error] ^
* so I got around it by defining the actual code in another method and
* making the setup() calls everywhere call this. (FIXME unfortunately this
* is error-prone.)
*/
def init(context: TContext) {
import HadoopExperimentConfiguration._
val conf = context.getConfiguration
val ap = new ArgParser(progname)
// Initialize set of parameters in `ap`
create_param_object(ap)
// Retrieve configuration values and store in `ap`
convert_parameters_from_hadoop_conf(hadoop_conf_prefix, ap, conf)
// Now create a class containing the stored configuration values
val params = create_param_object(ap)
driver.set_task_context(context)
context.progress
driver.set_parameters(params)
context.progress
driver.setup_for_run()
context.progress
}
}
}
| utcompling/fieldspring | src/main/scala/opennlp/fieldspring/util/hadoop.scala | Scala | apache-2.0 | 16,868 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{FileSystem => _, _}
import java.net.{InetAddress, UnknownHostException, URI}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.security.PrivilegedExceptionAction
import java.util.{Locale, Properties, UUID}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map}
import scala.util.control.NonFatal
import com.google.common.base.Objects
import com.google.common.io.Files
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.io.DataOutputBuffer
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.protocolrecords._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException
import org.apache.hadoop.yarn.util.Records
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.{SparkApplication, SparkHadoopUtil}
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.deploy.yarn.security.YARNHadoopDelegationTokenManager
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle, YarnCommandBuilderUtils}
import org.apache.spark.util.{CallerContext, Utils}
private[spark] class Client(
val args: ClientArguments,
val sparkConf: SparkConf)
extends Logging {
import Client._
import YarnSparkHadoopUtil._
private val yarnClient = YarnClient.createYarnClient
private val hadoopConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf))
private val isClusterMode = sparkConf.get("spark.submit.deployMode", "client") == "cluster"
// AM related configurations
private val amMemory = if (isClusterMode) {
sparkConf.get(DRIVER_MEMORY).toInt
} else {
sparkConf.get(AM_MEMORY).toInt
}
private val amMemoryOverhead = {
val amMemoryOverheadEntry = if (isClusterMode) DRIVER_MEMORY_OVERHEAD else AM_MEMORY_OVERHEAD
sparkConf.get(amMemoryOverheadEntry).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
}
private val amCores = if (isClusterMode) {
sparkConf.get(DRIVER_CORES)
} else {
sparkConf.get(AM_CORES)
}
// Executor related configurations
private val executorMemory = sparkConf.get(EXECUTOR_MEMORY)
private val executorMemoryOverhead = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
private val distCacheMgr = new ClientDistributedCacheManager()
private val principal = sparkConf.get(PRINCIPAL).orNull
private val keytab = sparkConf.get(KEYTAB).orNull
private val loginFromKeytab = principal != null
private val amKeytabFileName: String = {
require((principal == null) == (keytab == null),
"Both principal and keytab must be defined, or neither.")
if (loginFromKeytab) {
logInfo(s"Kerberos credentials: principal = $principal, keytab = $keytab")
// Generate a file name that can be used for the keytab file, that does not conflict
// with any user file.
new File(keytab).getName() + "-" + UUID.randomUUID().toString
} else {
null
}
}
private val launcherBackend = new LauncherBackend() {
override protected def conf: SparkConf = sparkConf
override def onStopRequest(): Unit = {
if (isClusterMode && appId != null) {
yarnClient.killApplication(appId)
} else {
setState(SparkAppHandle.State.KILLED)
stop()
}
}
}
private val fireAndForget = isClusterMode && !sparkConf.get(WAIT_FOR_APP_COMPLETION)
private var appId: ApplicationId = null
// The app staging dir based on the STAGING_DIR configuration if configured
// otherwise based on the users home directory.
private val appStagingBaseDir = sparkConf.get(STAGING_DIR).map { new Path(_) }
.getOrElse(FileSystem.get(hadoopConf).getHomeDirectory())
def reportLauncherState(state: SparkAppHandle.State): Unit = {
launcherBackend.setState(state)
}
def stop(): Unit = {
launcherBackend.close()
yarnClient.stop()
}
/**
* Submit an application running our ApplicationMaster to the ResourceManager.
*
* The stable Yarn API provides a convenience method (YarnClient#createApplication) for
* creating applications and setting up the application submission context. This was not
* available in the alpha API.
*/
def submitApplication(): ApplicationId = {
var appId: ApplicationId = null
try {
launcherBackend.connect()
yarnClient.init(hadoopConf)
yarnClient.start()
logInfo("Requesting a new application from cluster with %d NodeManagers"
.format(yarnClient.getYarnClusterMetrics.getNumNodeManagers))
// Get a new application from our RM
val newApp = yarnClient.createApplication()
val newAppResponse = newApp.getNewApplicationResponse()
appId = newAppResponse.getApplicationId()
new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT),
Option(appId.toString)).setCurrentContext()
// Verify whether the cluster has enough resources for our AM
verifyClusterResources(newAppResponse)
// Set up the appropriate contexts to launch our AM
val containerContext = createContainerLaunchContext(newAppResponse)
val appContext = createApplicationSubmissionContext(newApp, containerContext)
// Finally, submit and monitor the application
logInfo(s"Submitting application $appId to ResourceManager")
yarnClient.submitApplication(appContext)
launcherBackend.setAppId(appId.toString)
reportLauncherState(SparkAppHandle.State.SUBMITTED)
appId
} catch {
case e: Throwable =>
if (appId != null) {
cleanupStagingDir(appId)
}
throw e
}
}
/**
* Cleanup application staging directory.
*/
private def cleanupStagingDir(appId: ApplicationId): Unit = {
if (sparkConf.get(PRESERVE_STAGING_FILES)) {
return
}
def cleanupStagingDirInternal(): Unit = {
val stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
try {
val fs = stagingDirPath.getFileSystem(hadoopConf)
if (fs.delete(stagingDirPath, true)) {
logInfo(s"Deleted staging directory $stagingDirPath")
}
} catch {
case ioe: IOException =>
logWarning("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
if (isClusterMode && principal != null && keytab != null) {
val newUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab)
newUgi.doAs(new PrivilegedExceptionAction[Unit] {
override def run(): Unit = {
cleanupStagingDirInternal()
}
})
} else {
cleanupStagingDirInternal()
}
}
/**
* Set up the context for submitting our ApplicationMaster.
* This uses the YarnClientApplication not available in the Yarn alpha API.
*/
def createApplicationSubmissionContext(
newApp: YarnClientApplication,
containerContext: ContainerLaunchContext): ApplicationSubmissionContext = {
val appContext = newApp.getApplicationSubmissionContext
appContext.setApplicationName(sparkConf.get("spark.app.name", "Spark"))
appContext.setQueue(sparkConf.get(QUEUE_NAME))
appContext.setAMContainerSpec(containerContext)
appContext.setApplicationType("SPARK")
sparkConf.get(APPLICATION_TAGS).foreach { tags =>
appContext.setApplicationTags(new java.util.HashSet[String](tags.asJava))
}
sparkConf.get(MAX_APP_ATTEMPTS) match {
case Some(v) => appContext.setMaxAppAttempts(v)
case None => logDebug(s"${MAX_APP_ATTEMPTS.key} is not set. " +
"Cluster's default value will be used.")
}
sparkConf.get(AM_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).foreach { interval =>
appContext.setAttemptFailuresValidityInterval(interval)
}
val capability = Records.newRecord(classOf[Resource])
capability.setMemory(amMemory + amMemoryOverhead)
capability.setVirtualCores(amCores)
sparkConf.get(AM_NODE_LABEL_EXPRESSION) match {
case Some(expr) =>
val amRequest = Records.newRecord(classOf[ResourceRequest])
amRequest.setResourceName(ResourceRequest.ANY)
amRequest.setPriority(Priority.newInstance(0))
amRequest.setCapability(capability)
amRequest.setNumContainers(1)
amRequest.setNodeLabelExpression(expr)
appContext.setAMContainerResourceRequest(amRequest)
case None =>
appContext.setResource(capability)
}
sparkConf.get(ROLLED_LOG_INCLUDE_PATTERN).foreach { includePattern =>
try {
val logAggregationContext = Records.newRecord(classOf[LogAggregationContext])
// These two methods were added in Hadoop 2.6.4, so we still need to use reflection to
// avoid compile error when building against Hadoop 2.6.0 ~ 2.6.3.
val setRolledLogsIncludePatternMethod =
logAggregationContext.getClass.getMethod("setRolledLogsIncludePattern", classOf[String])
setRolledLogsIncludePatternMethod.invoke(logAggregationContext, includePattern)
sparkConf.get(ROLLED_LOG_EXCLUDE_PATTERN).foreach { excludePattern =>
val setRolledLogsExcludePatternMethod =
logAggregationContext.getClass.getMethod("setRolledLogsExcludePattern", classOf[String])
setRolledLogsExcludePatternMethod.invoke(logAggregationContext, excludePattern)
}
appContext.setLogAggregationContext(logAggregationContext)
} catch {
case NonFatal(e) =>
logWarning(s"Ignoring ${ROLLED_LOG_INCLUDE_PATTERN.key} because the version of YARN " +
"does not support it", e)
}
}
appContext
}
/**
* Set up security tokens for launching our ApplicationMaster container.
*
* This method will obtain delegation tokens from all the registered providers, and set them in
* the AM's launch context.
*/
private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = {
val credentials = UserGroupInformation.getCurrentUser().getCredentials()
val credentialManager = new YARNHadoopDelegationTokenManager(sparkConf, hadoopConf)
credentialManager.obtainDelegationTokens(hadoopConf, credentials)
// When using a proxy user, copy the delegation tokens to the user's credentials. Avoid
// that for regular users, since in those case the user already has access to the TGT,
// and adding delegation tokens could lead to expired or cancelled tokens being used
// later, as reported in SPARK-15754.
val currentUser = UserGroupInformation.getCurrentUser()
if (SparkHadoopUtil.get.isProxyUser(currentUser)) {
currentUser.addCredentials(credentials)
}
val dob = new DataOutputBuffer
credentials.writeTokenStorageToStream(dob)
amContainer.setTokens(ByteBuffer.wrap(dob.getData))
}
/** Get the application report from the ResourceManager for an application we have submitted. */
def getApplicationReport(appId: ApplicationId): ApplicationReport =
yarnClient.getApplicationReport(appId)
/**
* Return the security token used by this client to communicate with the ApplicationMaster.
* If no security is enabled, the token returned by the report is null.
*/
private def getClientToken(report: ApplicationReport): String =
Option(report.getClientToAMToken).map(_.toString).getOrElse("")
/**
* Fail fast if we have requested more resources per container than is available in the cluster.
*/
private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = {
val maxMem = newAppResponse.getMaximumResourceCapability().getMemory()
logInfo("Verifying our application has not requested more than the maximum " +
s"memory capability of the cluster ($maxMem MB per container)")
val executorMem = executorMemory + executorMemoryOverhead
if (executorMem > maxMem) {
throw new IllegalArgumentException(s"Required executor memory ($executorMemory" +
s"+$executorMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " +
"'yarn.nodemanager.resource.memory-mb'.")
}
val amMem = amMemory + amMemoryOverhead
if (amMem > maxMem) {
throw new IllegalArgumentException(s"Required AM memory ($amMemory" +
s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please increase the value of 'yarn.scheduler.maximum-allocation-mb'.")
}
logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format(
amMem,
amMemoryOverhead))
// We could add checks to make sure the entire cluster has enough resources but that involves
// getting all the node reports and computing ourselves.
}
/**
* Copy the given file to a remote file system (e.g. HDFS) if needed.
* The file is only copied if the source and destination file systems are different or the source
* scheme is "file". This is used for preparing resources for launching the ApplicationMaster
* container. Exposed for testing.
*/
private[yarn] def copyFileToRemote(
destDir: Path,
srcPath: Path,
replication: Short,
symlinkCache: Map[URI, Path],
force: Boolean = false,
destName: Option[String] = None): Path = {
val destFs = destDir.getFileSystem(hadoopConf)
val srcFs = srcPath.getFileSystem(hadoopConf)
var destPath = srcPath
if (force || !compareFs(srcFs, destFs) || "file".equals(srcFs.getScheme)) {
destPath = new Path(destDir, destName.getOrElse(srcPath.getName()))
logInfo(s"Uploading resource $srcPath -> $destPath")
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf)
destFs.setReplication(destPath, replication)
destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION))
} else {
logInfo(s"Source and destination file systems are the same. Not copying $srcPath")
}
// Resolve any symlinks in the URI path so using a "current" symlink to point to a specific
// version shows the specific version in the distributed cache configuration
val qualifiedDestPath = destFs.makeQualified(destPath)
val qualifiedDestDir = qualifiedDestPath.getParent
val resolvedDestDir = symlinkCache.getOrElseUpdate(qualifiedDestDir.toUri(), {
val fc = FileContext.getFileContext(qualifiedDestDir.toUri(), hadoopConf)
fc.resolvePath(qualifiedDestDir)
})
new Path(resolvedDestDir, qualifiedDestPath.getName())
}
/**
* Upload any resources to the distributed cache if needed. If a resource is intended to be
* consumed locally, set up the appropriate config for downstream code to handle it properly.
* This is used for setting up a container launch context for our ApplicationMaster.
* Exposed for testing.
*/
def prepareLocalResources(
destDir: Path,
pySparkArchives: Seq[String]): HashMap[String, LocalResource] = {
logInfo("Preparing resources for our AM container")
// Upload Spark and the application JAR to the remote file system if necessary,
// and add them as local resources to the application master.
val fs = destDir.getFileSystem(hadoopConf)
// Used to keep track of URIs added to the distributed cache. If the same URI is added
// multiple times, YARN will fail to launch containers for the app with an internal
// error.
val distributedUris = new HashSet[String]
// Used to keep track of URIs(files) added to the distribute cache have the same name. If
// same name but different path files are added multiple time, YARN will fail to launch
// containers for the app with an internal error.
val distributedNames = new HashSet[String]
val replication = sparkConf.get(STAGING_FILE_REPLICATION).map(_.toShort)
.getOrElse(fs.getDefaultReplication(destDir))
val localResources = HashMap[String, LocalResource]()
FileSystem.mkdirs(fs, destDir, new FsPermission(STAGING_DIR_PERMISSION))
val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]()
val symlinkCache: Map[URI, Path] = HashMap[URI, Path]()
def addDistributedUri(uri: URI): Boolean = {
val uriStr = uri.toString()
val fileName = new File(uri.getPath).getName
if (distributedUris.contains(uriStr)) {
logWarning(s"Same path resource $uri added multiple times to distributed cache.")
false
} else if (distributedNames.contains(fileName)) {
logWarning(s"Same name resource $uri added multiple times to distributed cache")
false
} else {
distributedUris += uriStr
distributedNames += fileName
true
}
}
/**
* Distribute a file to the cluster.
*
* If the file's path is a "local:" URI, it's actually not distributed. Other files are copied
* to HDFS (if not already there) and added to the application's distributed cache.
*
* @param path URI of the file to distribute.
* @param resType Type of resource being distributed.
* @param destName Name of the file in the distributed cache.
* @param targetDir Subdirectory where to place the file.
* @param appMasterOnly Whether to distribute only to the AM.
* @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the
* localized path for non-local paths, or the input `path` for local paths.
* The localized path will be null if the URI has already been added to the cache.
*/
def distribute(
path: String,
resType: LocalResourceType = LocalResourceType.FILE,
destName: Option[String] = None,
targetDir: Option[String] = None,
appMasterOnly: Boolean = false): (Boolean, String) = {
val trimmedPath = path.trim()
val localURI = Utils.resolveURI(trimmedPath)
if (localURI.getScheme != LOCAL_SCHEME) {
if (addDistributedUri(localURI)) {
val localPath = getQualifiedLocalPath(localURI, hadoopConf)
val linkname = targetDir.map(_ + "/").getOrElse("") +
destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName())
val destPath = copyFileToRemote(destDir, localPath, replication, symlinkCache)
val destFs = FileSystem.get(destPath.toUri(), hadoopConf)
distCacheMgr.addResource(
destFs, hadoopConf, destPath, localResources, resType, linkname, statCache,
appMasterOnly = appMasterOnly)
(false, linkname)
} else {
(false, null)
}
} else {
(true, trimmedPath)
}
}
// If we passed in a keytab, make sure we copy the keytab to the staging directory on
// HDFS, and setup the relevant environment vars, so the AM can login again.
if (loginFromKeytab) {
logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" +
" via the YARN Secure Distributed Cache.")
val (_, localizedPath) = distribute(keytab,
destName = Some(amKeytabFileName),
appMasterOnly = true)
require(localizedPath != null, "Keytab file already distributed.")
}
/**
* Add Spark to the cache. There are two settings that control what files to add to the cache:
* - if a Spark archive is defined, use the archive. The archive is expected to contain
* jar files at its root directory.
* - if a list of jars is provided, filter the non-local ones, resolve globs, and
* add the found files to the cache.
*
* Note that the archive cannot be a "local" URI. If none of the above settings are found,
* then upload all files found in $SPARK_HOME/jars.
*/
val sparkArchive = sparkConf.get(SPARK_ARCHIVE)
if (sparkArchive.isDefined) {
val archive = sparkArchive.get
require(!isLocalUri(archive), s"${SPARK_ARCHIVE.key} cannot be a local URI.")
distribute(Utils.resolveURI(archive).toString,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
} else {
sparkConf.get(SPARK_JARS) match {
case Some(jars) =>
// Break the list of jars to upload, and resolve globs.
val localJars = new ArrayBuffer[String]()
jars.foreach { jar =>
if (!isLocalUri(jar)) {
val path = getQualifiedLocalPath(Utils.resolveURI(jar), hadoopConf)
val pathFs = FileSystem.get(path.toUri(), hadoopConf)
pathFs.globStatus(path).filter(_.isFile()).foreach { entry =>
val uri = entry.getPath().toUri()
statCache.update(uri, entry)
distribute(uri.toString(), targetDir = Some(LOCALIZED_LIB_DIR))
}
} else {
localJars += jar
}
}
// Propagate the local URIs to the containers using the configuration.
sparkConf.set(SPARK_JARS, localJars)
case None =>
// No configuration, so fall back to uploading local jar files.
logWarning(s"Neither ${SPARK_JARS.key} nor ${SPARK_ARCHIVE.key} is set, falling back " +
"to uploading libraries under SPARK_HOME.")
val jarsDir = new File(YarnCommandBuilderUtils.findJarsDir(
sparkConf.getenv("SPARK_HOME")))
val jarsArchive = File.createTempFile(LOCALIZED_LIB_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val jarsStream = new ZipOutputStream(new FileOutputStream(jarsArchive))
try {
jarsStream.setLevel(0)
jarsDir.listFiles().foreach { f =>
if (f.isFile && f.getName.toLowerCase(Locale.ROOT).endsWith(".jar") && f.canRead) {
jarsStream.putNextEntry(new ZipEntry(f.getName))
Files.copy(f, jarsStream)
jarsStream.closeEntry()
}
}
} finally {
jarsStream.close()
}
distribute(jarsArchive.toURI.getPath,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
jarsArchive.delete()
}
}
/**
* Copy user jar to the distributed cache if their scheme is not "local".
* Otherwise, set the corresponding key in our SparkConf to handle it downstream.
*/
Option(args.userJar).filter(_.trim.nonEmpty).foreach { jar =>
val (isLocal, localizedPath) = distribute(jar, destName = Some(APP_JAR_NAME))
if (isLocal) {
require(localizedPath != null, s"Path $jar already distributed")
// If the resource is intended for local use only, handle this downstream
// by setting the appropriate property
sparkConf.set(APP_JAR, localizedPath)
}
}
/**
* Do the same for any additional resources passed in through ClientArguments.
* Each resource category is represented by a 3-tuple of:
* (1) comma separated list of resources in this category,
* (2) resource type, and
* (3) whether to add these resources to the classpath
*/
val cachedSecondaryJarLinks = ListBuffer.empty[String]
List(
(sparkConf.get(JARS_TO_DISTRIBUTE), LocalResourceType.FILE, true),
(sparkConf.get(FILES_TO_DISTRIBUTE), LocalResourceType.FILE, false),
(sparkConf.get(ARCHIVES_TO_DISTRIBUTE), LocalResourceType.ARCHIVE, false)
).foreach { case (flist, resType, addToClasspath) =>
flist.foreach { file =>
val (_, localizedPath) = distribute(file, resType = resType)
// If addToClassPath, we ignore adding jar multiple times to distributed cache.
if (addToClasspath) {
if (localizedPath != null) {
cachedSecondaryJarLinks += localizedPath
}
} else {
if (localizedPath == null) {
throw new IllegalArgumentException(s"Attempt to add ($file) multiple times" +
" to the distributed cache.")
}
}
}
}
if (cachedSecondaryJarLinks.nonEmpty) {
sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks)
}
if (isClusterMode && args.primaryPyFile != null) {
distribute(args.primaryPyFile, appMasterOnly = true)
}
pySparkArchives.foreach { f => distribute(f) }
// The python files list needs to be treated especially. All files that are not an
// archive need to be placed in a subdirectory that will be added to PYTHONPATH.
sparkConf.get(PY_FILES).foreach { f =>
val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None
distribute(f, targetDir = targetDir)
}
// Update the configuration with all the distributed files, minus the conf archive. The
// conf archive will be handled by the AM differently so that we avoid having to send
// this configuration by other means. See SPARK-14602 for one reason of why this is needed.
distCacheMgr.updateConfiguration(sparkConf)
// Upload the conf archive to HDFS manually, and record its location in the configuration.
// This will allow the AM to know where the conf archive is in HDFS, so that it can be
// distributed to the containers.
//
// This code forces the archive to be copied, so that unit tests pass (since in that case both
// file systems are the same and the archive wouldn't normally be copied). In most (all?)
// deployments, the archive would be copied anyway, since it's a temp file in the local file
// system.
val remoteConfArchivePath = new Path(destDir, LOCALIZED_CONF_ARCHIVE)
val remoteFs = FileSystem.get(remoteConfArchivePath.toUri(), hadoopConf)
sparkConf.set(CACHED_CONF_ARCHIVE, remoteConfArchivePath.toString())
val localConfArchive = new Path(createConfArchive().toURI())
copyFileToRemote(destDir, localConfArchive, replication, symlinkCache, force = true,
destName = Some(LOCALIZED_CONF_ARCHIVE))
// Manually add the config archive to the cache manager so that the AM is launched with
// the proper files set up.
distCacheMgr.addResource(
remoteFs, hadoopConf, remoteConfArchivePath, localResources, LocalResourceType.ARCHIVE,
LOCALIZED_CONF_DIR, statCache, appMasterOnly = false)
// Clear the cache-related entries from the configuration to avoid them polluting the
// UI's environment page. This works for client mode; for cluster mode, this is handled
// by the AM.
CACHE_CONFIGS.foreach(sparkConf.remove)
localResources
}
/**
* Create an archive with the config files for distribution.
*
* These will be used by AM and executors. The files are zipped and added to the job as an
* archive, so that YARN will explode it when distributing to AM and executors. This directory
* is then added to the classpath of AM and executor process, just to make sure that everybody
* is using the same default config.
*
* This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR
* shows up in the classpath before YARN_CONF_DIR.
*
* Currently this makes a shallow copy of the conf directory. If there are cases where a
* Hadoop config directory contains subdirectories, this code will have to be fixed.
*
* The archive also contains some Spark configuration. Namely, it saves the contents of
* SparkConf in a file to be loaded by the AM process.
*/
private def createConfArchive(): File = {
val hadoopConfFiles = new HashMap[String, File]()
// SPARK_CONF_DIR shows up in the classpath before HADOOP_CONF_DIR/YARN_CONF_DIR
sys.env.get("SPARK_CONF_DIR").foreach { localConfDir =>
val dir = new File(localConfDir)
if (dir.isDirectory) {
val files = dir.listFiles(new FileFilter {
override def accept(pathname: File): Boolean = {
pathname.isFile && pathname.getName.endsWith(".xml")
}
})
files.foreach { f => hadoopConfFiles(f.getName) = f }
}
}
// SPARK-23630: during testing, Spark scripts filter out hadoop conf dirs so that user's
// environments do not interfere with tests. This allows a special env variable during
// tests so that custom conf dirs can be used by unit tests.
val confDirs = Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR") ++
(if (Utils.isTesting) Seq("SPARK_TEST_HADOOP_CONF_DIR") else Nil)
confDirs.foreach { envKey =>
sys.env.get(envKey).foreach { path =>
val dir = new File(path)
if (dir.isDirectory()) {
val files = dir.listFiles()
if (files == null) {
logWarning("Failed to list files under directory " + dir)
} else {
files.foreach { file =>
if (file.isFile && !hadoopConfFiles.contains(file.getName())) {
hadoopConfFiles(file.getName()) = file
}
}
}
}
}
}
val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val confStream = new ZipOutputStream(new FileOutputStream(confArchive))
try {
confStream.setLevel(0)
// Upload $SPARK_CONF_DIR/log4j.properties file to the distributed cache to make sure that
// the executors will use the latest configurations instead of the default values. This is
// required when user changes log4j.properties directly to set the log configurations. If
// configuration file is provided through --files then executors will be taking configurations
// from --files instead of $SPARK_CONF_DIR/log4j.properties.
// Also upload metrics.properties to distributed cache if exists in classpath.
// If user specify this file using --files then executors will use the one
// from --files instead.
for { prop <- Seq("log4j.properties", "metrics.properties")
url <- Option(Utils.getContextOrSparkClassLoader.getResource(prop))
if url.getProtocol == "file" } {
val file = new File(url.getPath())
confStream.putNextEntry(new ZipEntry(file.getName()))
Files.copy(file, confStream)
confStream.closeEntry()
}
// Save the Hadoop config files under a separate directory in the archive. This directory
// is appended to the classpath so that the cluster-provided configuration takes precedence.
confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/"))
confStream.closeEntry()
hadoopConfFiles.foreach { case (name, file) =>
if (file.canRead()) {
confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/$name"))
Files.copy(file, confStream)
confStream.closeEntry()
}
}
// Save the YARN configuration into a separate file that will be overlayed on top of the
// cluster's Hadoop conf.
confStream.putNextEntry(new ZipEntry(SparkHadoopUtil.SPARK_HADOOP_CONF_FILE))
hadoopConf.writeXml(confStream)
confStream.closeEntry()
// Save Spark configuration to a file in the archive, but filter out the app's secret.
val props = new Properties()
sparkConf.getAll.foreach { case (k, v) =>
props.setProperty(k, v)
}
// Override spark.yarn.key to point to the location in distributed cache which will be used
// by AM.
Option(amKeytabFileName).foreach { k => props.setProperty(KEYTAB.key, k) }
confStream.putNextEntry(new ZipEntry(SPARK_CONF_FILE))
val writer = new OutputStreamWriter(confStream, StandardCharsets.UTF_8)
props.store(writer, "Spark configuration.")
writer.flush()
confStream.closeEntry()
} finally {
confStream.close()
}
confArchive
}
/**
* Set up the environment for launching our ApplicationMaster container.
*/
private def setupLaunchEnv(
stagingDirPath: Path,
pySparkArchives: Seq[String]): HashMap[String, String] = {
logInfo("Setting up the launch environment for our AM container")
val env = new HashMap[String, String]()
populateClasspath(args, hadoopConf, sparkConf, env, sparkConf.get(DRIVER_CLASS_PATH))
env("SPARK_YARN_STAGING_DIR") = stagingDirPath.toString
env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName()
// Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.*
val amEnvPrefix = "spark.yarn.appMasterEnv."
sparkConf.getAll
.filter { case (k, v) => k.startsWith(amEnvPrefix) }
.map { case (k, v) => (k.substring(amEnvPrefix.length), v) }
.foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) }
// If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH
// of the container processes too. Add all non-.py files directly to PYTHONPATH.
//
// NOTE: the code currently does not handle .py files defined with a "local:" scheme.
val pythonPath = new ListBuffer[String]()
val (pyFiles, pyArchives) = sparkConf.get(PY_FILES).partition(_.endsWith(".py"))
if (pyFiles.nonEmpty) {
pythonPath += buildPath(Environment.PWD.$$(), LOCALIZED_PYTHON_DIR)
}
(pySparkArchives ++ pyArchives).foreach { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme != LOCAL_SCHEME) {
pythonPath += buildPath(Environment.PWD.$$(), new Path(uri).getName())
} else {
pythonPath += uri.getPath()
}
}
// Finally, update the Spark config to propagate PYTHONPATH to the AM and executors.
if (pythonPath.nonEmpty) {
val pythonPathList = (sys.env.get("PYTHONPATH") ++ pythonPath)
env("PYTHONPATH") = (env.get("PYTHONPATH") ++ pythonPathList)
.mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
val pythonPathExecutorEnv = (sparkConf.getExecutorEnv.toMap.get("PYTHONPATH") ++
pythonPathList).mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
sparkConf.setExecutorEnv("PYTHONPATH", pythonPathExecutorEnv)
}
if (isClusterMode) {
// propagate PYSPARK_DRIVER_PYTHON and PYSPARK_PYTHON to driver in cluster mode
Seq("PYSPARK_DRIVER_PYTHON", "PYSPARK_PYTHON").foreach { envname =>
if (!env.contains(envname)) {
sys.env.get(envname).foreach(env(envname) = _)
}
}
sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _))
}
sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp =>
env(ENV_DIST_CLASSPATH) = dcp
}
env
}
/**
* Set up a ContainerLaunchContext to launch our ApplicationMaster container.
* This sets up the launch environment, java options, and the command for launching the AM.
*/
private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
: ContainerLaunchContext = {
logInfo("Setting up container launch context for our AM")
val appId = newAppResponse.getApplicationId
val appStagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
val pySparkArchives =
if (sparkConf.get(IS_PYTHON_APP)) {
findPySparkArchives()
} else {
Nil
}
val launchEnv = setupLaunchEnv(appStagingDirPath, pySparkArchives)
val localResources = prepareLocalResources(appStagingDirPath, pySparkArchives)
val amContainer = Records.newRecord(classOf[ContainerLaunchContext])
amContainer.setLocalResources(localResources.asJava)
amContainer.setEnvironment(launchEnv.asJava)
val javaOpts = ListBuffer[String]()
// Set the environment variable through a command prefix
// to append to the existing value of the variable
var prefixEnv: Option[String] = None
// Add Xmx for AM memory
javaOpts += "-Xmx" + amMemory + "m"
val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR)
javaOpts += "-Djava.io.tmpdir=" + tmpDir
// TODO: Remove once cpuset version is pushed out.
// The context is, default gc for server class machines ends up using all cores to do gc -
// hence if there are multiple containers in same node, Spark GC affects all other containers'
// performance (which can be that of other Spark containers)
// Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in
// multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset
// of cores on a node.
val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean)
if (useConcurrentAndIncrementalGC) {
// In our expts, using (default) throughput collector has severe perf ramifications in
// multi-tenant machines
javaOpts += "-XX:+UseConcMarkSweepGC"
javaOpts += "-XX:MaxTenuringThreshold=31"
javaOpts += "-XX:SurvivorRatio=8"
javaOpts += "-XX:+CMSIncrementalMode"
javaOpts += "-XX:+CMSIncrementalPacing"
javaOpts += "-XX:CMSIncrementalDutyCycleMin=0"
javaOpts += "-XX:CMSIncrementalDutyCycle=10"
}
// Include driver-specific java options if we are launching a driver
if (isClusterMode) {
sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts =>
javaOpts ++= Utils.splitCommandString(opts)
.map(Utils.substituteAppId(_, appId.toString))
.map(YarnSparkHadoopUtil.escapeForShell)
}
val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH),
sys.props.get("spark.driver.libraryPath")).flatten
if (libraryPaths.nonEmpty) {
prefixEnv = Some(createLibraryPathPrefix(libraryPaths.mkString(File.pathSeparator),
sparkConf))
}
if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) {
logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode")
}
} else {
// Validate and include yarn am specific java options in yarn-client mode.
sparkConf.get(AM_JAVA_OPTIONS).foreach { opts =>
if (opts.contains("-Dspark")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')."
throw new SparkException(msg)
}
if (opts.contains("-Xmx")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " +
s"(was '$opts'). Use spark.yarn.am.memory instead."
throw new SparkException(msg)
}
javaOpts ++= Utils.splitCommandString(opts)
.map(Utils.substituteAppId(_, appId.toString))
.map(YarnSparkHadoopUtil.escapeForShell)
}
sparkConf.get(AM_LIBRARY_PATH).foreach { paths =>
prefixEnv = Some(createLibraryPathPrefix(paths, sparkConf))
}
}
// For log4j configuration to reference
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
val userClass =
if (isClusterMode) {
Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
} else {
Nil
}
val userJar =
if (args.userJar != null) {
Seq("--jar", args.userJar)
} else {
Nil
}
val primaryPyFile =
if (isClusterMode && args.primaryPyFile != null) {
Seq("--primary-py-file", new Path(args.primaryPyFile).getName())
} else {
Nil
}
val primaryRFile =
if (args.primaryRFile != null) {
Seq("--primary-r-file", args.primaryRFile)
} else {
Nil
}
val amClass =
if (isClusterMode) {
Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
} else {
Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
}
if (args.primaryRFile != null && args.primaryRFile.endsWith(".R")) {
args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs
}
val userArgs = args.userArgs.flatMap { arg =>
Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg))
}
val amArgs =
Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++
Seq("--properties-file", buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE))
// Command for the ApplicationMaster
val commands = prefixEnv ++
Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
javaOpts ++ amArgs ++
Seq(
"1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
"2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
// TODO: it would be nicer to just make sure there are no null commands here
val printableCommands = commands.map(s => if (s == null) "null" else s).toList
amContainer.setCommands(printableCommands.asJava)
logDebug("===============================================================================")
logDebug("YARN AM launch context:")
logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}")
logDebug(" env:")
if (log.isDebugEnabled) {
Utils.redact(sparkConf, launchEnv.toSeq).foreach { case (k, v) =>
logDebug(s" $k -> $v")
}
}
logDebug(" resources:")
localResources.foreach { case (k, v) => logDebug(s" $k -> $v")}
logDebug(" command:")
logDebug(s" ${printableCommands.mkString(" ")}")
logDebug("===============================================================================")
// send the acl settings into YARN to control who has access via YARN interfaces
val securityManager = new SecurityManager(sparkConf)
amContainer.setApplicationACLs(
YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava)
setupSecurityToken(amContainer)
amContainer
}
/**
* Report the state of an application until it has exited, either successfully or
* due to some failure, then return a pair of the yarn application state (FINISHED, FAILED,
* KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED,
* or KILLED).
*
* @param appId ID of the application to monitor.
* @param returnOnRunning Whether to also return the application state when it is RUNNING.
* @param logApplicationReport Whether to log details of the application report every iteration.
* @param interval How often to poll the YARN RM for application status (in ms).
* @return A pair of the yarn application state and the final application state.
*/
def monitorApplication(
appId: ApplicationId,
returnOnRunning: Boolean = false,
logApplicationReport: Boolean = true,
interval: Long = sparkConf.get(REPORT_INTERVAL)): YarnAppReport = {
var lastState: YarnApplicationState = null
while (true) {
Thread.sleep(interval)
val report: ApplicationReport =
try {
getApplicationReport(appId)
} catch {
case e: ApplicationNotFoundException =>
logError(s"Application $appId not found.")
cleanupStagingDir(appId)
return YarnAppReport(YarnApplicationState.KILLED, FinalApplicationStatus.KILLED, None)
case NonFatal(e) =>
val msg = s"Failed to contact YARN for application $appId."
logError(msg, e)
// Don't necessarily clean up staging dir because status is unknown
return YarnAppReport(YarnApplicationState.FAILED, FinalApplicationStatus.FAILED,
Some(msg))
}
val state = report.getYarnApplicationState
if (logApplicationReport) {
logInfo(s"Application report for $appId (state: $state)")
// If DEBUG is enabled, log report details every iteration
// Otherwise, log them every time the application changes state
if (log.isDebugEnabled) {
logDebug(formatReportDetails(report))
} else if (lastState != state) {
logInfo(formatReportDetails(report))
}
}
if (lastState != state) {
state match {
case YarnApplicationState.RUNNING =>
reportLauncherState(SparkAppHandle.State.RUNNING)
case YarnApplicationState.FINISHED =>
report.getFinalApplicationStatus match {
case FinalApplicationStatus.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case FinalApplicationStatus.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
reportLauncherState(SparkAppHandle.State.FINISHED)
}
case YarnApplicationState.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case YarnApplicationState.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
}
}
if (state == YarnApplicationState.FINISHED ||
state == YarnApplicationState.FAILED ||
state == YarnApplicationState.KILLED) {
cleanupStagingDir(appId)
return createAppReport(report)
}
if (returnOnRunning && state == YarnApplicationState.RUNNING) {
return createAppReport(report)
}
lastState = state
}
// Never reached, but keeps compiler happy
throw new SparkException("While loop is depleted! This should never happen...")
}
private def formatReportDetails(report: ApplicationReport): String = {
val details = Seq[(String, String)](
("client token", getClientToken(report)),
("diagnostics", report.getDiagnostics),
("ApplicationMaster host", report.getHost),
("ApplicationMaster RPC port", report.getRpcPort.toString),
("queue", report.getQueue),
("start time", report.getStartTime.toString),
("final status", report.getFinalApplicationStatus.toString),
("tracking URL", report.getTrackingUrl),
("user", report.getUser)
)
// Use more loggable format if value is null or empty
details.map { case (k, v) =>
val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A")
s"\\n\\t $k: $newValue"
}.mkString("")
}
/**
* Submit an application to the ResourceManager.
* If set spark.yarn.submit.waitAppCompletion to true, it will stay alive
* reporting the application's status until the application has exited for any reason.
* Otherwise, the client process will exit after submission.
* If the application finishes with a failed, killed, or undefined status,
* throw an appropriate SparkException.
*/
def run(): Unit = {
this.appId = submitApplication()
if (!launcherBackend.isConnected() && fireAndForget) {
val report = getApplicationReport(appId)
val state = report.getYarnApplicationState
logInfo(s"Application report for $appId (state: $state)")
logInfo(formatReportDetails(report))
if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) {
throw new SparkException(s"Application $appId finished with status: $state")
}
} else {
val YarnAppReport(appState, finalState, diags) = monitorApplication(appId)
if (appState == YarnApplicationState.FAILED || finalState == FinalApplicationStatus.FAILED) {
diags.foreach { err =>
logError(s"Application diagnostics message: $err")
}
throw new SparkException(s"Application $appId finished with failed status")
}
if (appState == YarnApplicationState.KILLED || finalState == FinalApplicationStatus.KILLED) {
throw new SparkException(s"Application $appId is killed")
}
if (finalState == FinalApplicationStatus.UNDEFINED) {
throw new SparkException(s"The final status of application $appId is undefined")
}
}
}
private def findPySparkArchives(): Seq[String] = {
sys.env.get("PYSPARK_ARCHIVES_PATH")
.map(_.split(",").toSeq)
.getOrElse {
val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator)
val pyArchivesFile = new File(pyLibPath, "pyspark.zip")
require(pyArchivesFile.exists(),
s"$pyArchivesFile not found; cannot run pyspark application in YARN mode.")
val py4jFile = new File(pyLibPath, "py4j-0.10.7-src.zip")
require(py4jFile.exists(),
s"$py4jFile not found; cannot run pyspark application in YARN mode.")
Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath())
}
}
}
private object Client extends Logging {
// Alias for the user jar
val APP_JAR_NAME: String = "__app__.jar"
// URI scheme that identifies local resources
val LOCAL_SCHEME = "local"
// Staging directory for any temporary jars or files
val SPARK_STAGING: String = ".sparkStaging"
// Staging directory is private! -> rwx--------
val STAGING_DIR_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("700", 8).toShort)
// App files are world-wide readable and owner writable -> rw-r--r--
val APP_FILE_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)
// Distribution-defined classpath to add to processes
val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH"
// Subdirectory where the user's Spark and Hadoop config files will be placed.
val LOCALIZED_CONF_DIR = "__spark_conf__"
// Subdirectory in the conf directory containing Hadoop config files.
val LOCALIZED_HADOOP_CONF_DIR = "__hadoop_conf__"
// File containing the conf archive in the AM. See prepareLocalResources().
val LOCALIZED_CONF_ARCHIVE = LOCALIZED_CONF_DIR + ".zip"
// Name of the file in the conf archive containing Spark configuration.
val SPARK_CONF_FILE = "__spark_conf__.properties"
// Subdirectory where the user's python files (not archives) will be placed.
val LOCALIZED_PYTHON_DIR = "__pyfiles__"
// Subdirectory where Spark libraries will be placed.
val LOCALIZED_LIB_DIR = "__spark_libs__"
/**
* Return the path to the given application's staging directory.
*/
private def getAppStagingDir(appId: ApplicationId): String = {
buildPath(SPARK_STAGING, appId.toString())
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
*/
private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String])
: Unit = {
val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf)
classPathElementsToAdd.foreach { c =>
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim)
}
}
private def getYarnAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match {
case Some(s) => s.toSeq
case None => getDefaultYarnApplicationClasspath
}
private def getMRAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings("mapreduce.application.classpath")) match {
case Some(s) => s.toSeq
case None => getDefaultMRApplicationClasspath
}
private[yarn] def getDefaultYarnApplicationClasspath: Seq[String] =
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq
private[yarn] def getDefaultMRApplicationClasspath: Seq[String] =
StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH).toSeq
/**
* Populate the classpath entry in the given environment map.
*
* User jars are generally not added to the JVM's system classpath; those are handled by the AM
* and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars
* are included in the system classpath, though. The extra class path and other uploaded files are
* always made available through the system class path.
*
* @param args Client arguments (when starting the AM) or null (when starting executors).
*/
private[yarn] def populateClasspath(
args: ClientArguments,
conf: Configuration,
sparkConf: SparkConf,
env: HashMap[String, String],
extraClassPath: Option[String] = None): Unit = {
extraClassPath.foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
addClasspathEntry(Environment.PWD.$$(), env)
addClasspathEntry(Environment.PWD.$$() + Path.SEPARATOR + LOCALIZED_CONF_DIR, env)
if (sparkConf.get(USER_CLASS_PATH_FIRST)) {
// in order to properly add the app jar when user classpath is first
// we have to do the mainJar separate in order to send the right thing
// into addFileToClasspath
val mainJar =
if (args != null) {
getMainJarUri(Option(args.userJar))
} else {
getMainJarUri(sparkConf.get(APP_JAR))
}
mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR_NAME, env))
val secondaryJars =
if (args != null) {
getSecondaryJarUris(Option(sparkConf.get(JARS_TO_DISTRIBUTE)))
} else {
getSecondaryJarUris(sparkConf.get(SECONDARY_JARS))
}
secondaryJars.foreach { x =>
addFileToClasspath(sparkConf, conf, x, null, env)
}
}
// Add the Spark jars to the classpath, depending on how they were distributed.
addClasspathEntry(buildPath(Environment.PWD.$$(), LOCALIZED_LIB_DIR, "*"), env)
if (sparkConf.get(SPARK_ARCHIVE).isEmpty) {
sparkConf.get(SPARK_JARS).foreach { jars =>
jars.filter(isLocalUri).foreach { jar =>
val uri = new URI(jar)
addClasspathEntry(getClusterPath(sparkConf, uri.getPath()), env)
}
}
}
populateHadoopClasspath(conf, env)
sys.env.get(ENV_DIST_CLASSPATH).foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
// Add the localized Hadoop config at the end of the classpath, in case it contains other
// files (such as configuration files for different services) that are not part of the
// YARN cluster's config.
addClasspathEntry(
buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, LOCALIZED_HADOOP_CONF_DIR), env)
}
/**
* Returns a list of URIs representing the user classpath.
*
* @param conf Spark configuration.
*/
def getUserClasspath(conf: SparkConf): Array[URI] = {
val mainUri = getMainJarUri(conf.get(APP_JAR))
val secondaryUris = getSecondaryJarUris(conf.get(SECONDARY_JARS))
(mainUri ++ secondaryUris).toArray
}
private def getMainJarUri(mainJar: Option[String]): Option[URI] = {
mainJar.flatMap { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme == LOCAL_SCHEME) Some(uri) else None
}.orElse(Some(new URI(APP_JAR_NAME)))
}
private def getSecondaryJarUris(secondaryJars: Option[Seq[String]]): Seq[URI] = {
secondaryJars.getOrElse(Nil).map(new URI(_))
}
/**
* Adds the given path to the classpath, handling "local:" URIs correctly.
*
* If an alternate name for the file is given, and it's not a "local:" file, the alternate
* name will be added to the classpath (relative to the job's work directory).
*
* If not a "local:" file and no alternate name, the linkName will be added to the classpath.
*
* @param conf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param uri URI to add to classpath (optional).
* @param fileName Alternate name for the file (optional).
* @param env Map holding the environment variables.
*/
private def addFileToClasspath(
conf: SparkConf,
hadoopConf: Configuration,
uri: URI,
fileName: String,
env: HashMap[String, String]): Unit = {
if (uri != null && uri.getScheme == LOCAL_SCHEME) {
addClasspathEntry(getClusterPath(conf, uri.getPath), env)
} else if (fileName != null) {
addClasspathEntry(buildPath(Environment.PWD.$$(), fileName), env)
} else if (uri != null) {
val localPath = getQualifiedLocalPath(uri, hadoopConf)
val linkName = Option(uri.getFragment()).getOrElse(localPath.getName())
addClasspathEntry(buildPath(Environment.PWD.$$(), linkName), env)
}
}
/**
* Add the given path to the classpath entry of the given environment map.
* If the classpath is already set, this appends the new path to the existing classpath.
*/
private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit =
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path)
/**
* Returns the path to be sent to the NM for a path that is valid on the gateway.
*
* This method uses two configuration values:
*
* - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may
* only be valid in the gateway node.
* - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may
* contain, for example, env variable references, which will be expanded by the NMs when
* starting containers.
*
* If either config is not available, the input path is returned.
*/
def getClusterPath(conf: SparkConf, path: String): String = {
val localPath = conf.get(GATEWAY_ROOT_PATH)
val clusterPath = conf.get(REPLACEMENT_ROOT_PATH)
if (localPath != null && clusterPath != null) {
path.replace(localPath, clusterPath)
} else {
path
}
}
/**
* Return whether two URI represent file system are the same
*/
private[spark] def compareUri(srcUri: URI, dstUri: URI): Boolean = {
if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) {
return false
}
val srcAuthority = srcUri.getAuthority()
val dstAuthority = dstUri.getAuthority()
if (srcAuthority != null && !srcAuthority.equalsIgnoreCase(dstAuthority)) {
return false
}
var srcHost = srcUri.getHost()
var dstHost = dstUri.getHost()
// In HA or when using viewfs, the host part of the URI may not actually be a host, but the
// name of the HDFS namespace. Those names won't resolve, so avoid even trying if they
// match.
if (srcHost != null && dstHost != null && srcHost != dstHost) {
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName()
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName()
} catch {
case e: UnknownHostException =>
return false
}
}
Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort()
}
/**
* Return whether the two file systems are the same.
*/
protected def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = {
val srcUri = srcFs.getUri()
val dstUri = destFs.getUri()
compareUri(srcUri, dstUri)
}
/**
* Given a local URI, resolve it and return a qualified local path that corresponds to the URI.
* This is used for preparing local resources to be included in the container launch context.
*/
private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = {
val qualifiedURI =
if (localURI.getScheme == null) {
// If not specified, assume this is in the local filesystem to keep the behavior
// consistent with that of Hadoop
new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString)
} else {
localURI
}
new Path(qualifiedURI)
}
/**
* Whether to consider jars provided by the user to have precedence over the Spark jars when
* loading user classes.
*/
def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = {
if (isDriver) {
conf.get(DRIVER_USER_CLASS_PATH_FIRST)
} else {
conf.get(EXECUTOR_USER_CLASS_PATH_FIRST)
}
}
/**
* Joins all the path components using Path.SEPARATOR.
*/
def buildPath(components: String*): String = {
components.mkString(Path.SEPARATOR)
}
/** Returns whether the URI is a "local:" URI. */
def isLocalUri(uri: String): Boolean = {
uri.startsWith(s"$LOCAL_SCHEME:")
}
def createAppReport(report: ApplicationReport): YarnAppReport = {
val diags = report.getDiagnostics()
val diagsOpt = if (diags != null && diags.nonEmpty) Some(diags) else None
YarnAppReport(report.getYarnApplicationState(), report.getFinalApplicationStatus(), diagsOpt)
}
/**
* Create a properly quoted and escaped library path string to be added as a prefix to the command
* executed by YARN. This is different from normal quoting / escaping due to YARN executing the
* command through "bash -c".
*/
def createLibraryPathPrefix(libpath: String, conf: SparkConf): String = {
val cmdPrefix = if (Utils.isWindows) {
Utils.libraryPathEnvPrefix(Seq(libpath))
} else {
val envName = Utils.libraryPathEnvName
// For quotes, escape both the quote and the escape character when encoding in the command
// string.
val quoted = libpath.replace("\\"", "\\\\\\\\\\\\\\"")
envName + "=\\\\\\"" + quoted + File.pathSeparator + "$" + envName + "\\\\\\""
}
getClusterPath(conf, cmdPrefix)
}
}
private[spark] class YarnClusterApplication extends SparkApplication {
override def start(args: Array[String], conf: SparkConf): Unit = {
// SparkSubmit would use yarn cache to distribute files & jars in yarn mode,
// so remove them from sparkConf here for yarn mode.
conf.remove("spark.jars")
conf.remove("spark.files")
new Client(new ClientArguments(args), conf).run()
}
}
private[spark] case class YarnAppReport(
appState: YarnApplicationState,
finalState: FinalApplicationStatus,
diagnostics: Option[String])
| tejasapatil/spark | resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala | Scala | apache-2.0 | 63,187 |
package poly.io
import scala.collection.mutable
/**
* @author Tongfei Chen
*/
private[poly] object Util {
private[poly] def lcpLength(xs: Array[String], ys: Array[String]) = {
var i = 0
var s = 0
while (i < math.min(xs.length, ys.length)) {
if (xs(i) == ys(i)) s += 1
i += 1
}
s
}
private[poly] def relativize(xs: Array[String], ys: Array[String]) = {
val a = mutable.ArrayBuffer[String]()
val lcp = lcpLength(xs, ys)
for (x ← xs.drop(lcp)) a += ".."
for (x ← ys.drop(lcp)) a += x
a.toArray
}
private[poly] def resolve(xs: Array[String], ys: Array[String]) = {
val a = mutable.ArrayBuffer[String](xs: _*)
for (x ← ys) {
x match {
case ".." => a.remove(a.size - 1)
case "." => /* do nothing */
case _ => a += x
}
}
a.toArray
}
/**
* A lazy depth-first tree searcher.
* @author Tongfei Chen
*/
class DepthFirstTreeSearcher[A](s0: A)(t: A => Traversable[A]) extends Iterator[A] {
private[this] val stack = mutable.Stack(s0)
private[this] var curr: A = _
def hasNext = stack.nonEmpty
def next() = {
curr = stack.pop()
t(curr) foreach stack.push
curr
}
}
}
| ctongfei/poly-io | core/src/main/scala/poly/io/Util.scala | Scala | mit | 1,239 |
package com.datastax.spark.connector.rdd.partitioner
import java.net.InetAddress
import com.datastax.spark.connector.rdd.partitioner.dht.{CassandraNode, BigIntToken, TokenFactory}
import org.junit.Assert._
import org.junit.Test
class RandomPartitionerTokenRangeSplitterTest {
type TokenRange = com.datastax.spark.connector.rdd.partitioner.dht.TokenRange[BigInt, BigIntToken]
private def assertNoHoles(tokenRanges: Seq[TokenRange]) {
for (Seq(range1, range2) <- tokenRanges.sliding(2))
assertEquals(range1.end, range2.start)
}
@Test
def testSplit() {
val node = CassandraNode(InetAddress.getLocalHost, InetAddress.getLocalHost)
val splitter = new RandomPartitionerTokenRangeSplitter(2.0)
val rangeLeft = BigInt("0")
val rangeRight = BigInt("100")
val range = new TokenRange(
new BigIntToken(rangeLeft),
new BigIntToken(rangeRight), Set(node), None)
val out = splitter.split(range, 20)
// 2 rows per token on average; to so 10 tokens = 20 rows; therefore 10 splits
assertEquals(10, out.size)
assertEquals(rangeLeft, out.head.start.value)
assertEquals(rangeRight, out.last.end.value)
assertTrue(out.forall(_.endpoints == Set(node)))
assertNoHoles(out)
}
@Test
def testNoSplit() {
val splitter = new RandomPartitionerTokenRangeSplitter(2.0)
val rangeLeft = BigInt("0")
val rangeRight = BigInt("100")
val range = new TokenRange(
new BigIntToken(rangeLeft),
new BigIntToken(rangeRight), Set.empty, None)
val out = splitter.split(range, 500)
// range is too small to contain 500 rows
assertEquals(1, out.size)
assertEquals(rangeLeft, out.head.start.value)
assertEquals(rangeRight, out.last.end.value)
}
@Test
def testZeroRows() {
val splitter = new RandomPartitionerTokenRangeSplitter(0.0)
val rangeLeft = BigInt("0")
val rangeRight = BigInt("100")
val range = new TokenRange(
new BigIntToken(rangeLeft),
new BigIntToken(rangeRight), Set.empty, None)
val out = splitter.split(range, 500)
assertEquals(1, out.size)
assertEquals(rangeLeft, out.head.start.value)
assertEquals(rangeRight, out.last.end.value)
}
@Test
def testWrapAround() {
val splitter = new RandomPartitionerTokenRangeSplitter(2.0)
val rangeLeft = TokenFactory.RandomPartitionerTokenFactory.maxToken.value - 100
val rangeRight = BigInt("100")
val range = new TokenRange(
new BigIntToken(rangeLeft),
new BigIntToken(rangeRight), Set.empty, None)
val out = splitter.split(range, 20)
assertEquals(20, out.size)
assertEquals(rangeLeft, out.head.start.value)
assertEquals(rangeRight, out.last.end.value)
assertNoHoles(out)
}
}
| brkyvz/spark-cassandra-connector | spark-cassandra-connector/src/test/scala/com/datastax/spark/connector/rdd/partitioner/RandomPartitionerTokenRangeSplitterTest.scala | Scala | apache-2.0 | 2,732 |
package org.pdfextractor.algorithm.finder.it
import java.util.Locale
import org.apache.commons.lang3.StringUtils
import org.pdfextractor.algorithm.candidate.{CandidateFeatureType, MetaPhraseType}
import org.pdfextractor.algorithm.finder._
import org.pdfextractor.algorithm.finder.it.ItalianRegexPatterns._
import org.pdfextractor.algorithm.parser.{ParseResult, Phrase}
import org.pdfextractor.algorithm.phrase.PhraseTypesRefreshedEvent
import org.pdfextractor.algorithm.regex._
import org.pdfextractor.db.domain.dictionary.PaymentFieldType.NAME
import org.pdfextractor.db.domain.dictionary.SupportedLocales
import org.springframework.stereotype.Service
@Service
class ItalianNameFinder extends AbstractFinder(SupportedLocales.ITALY, NAME, None, None, false) {
@org.springframework.context.event.EventListener(
Array(classOf[PhraseTypesRefreshedEvent]))
def refreshed(): Unit = {
searchPattern = Some(
("^(?ims)" + phraseTypesStore.buildAllPhrases(SupportedLocales.ITALY,
NAME) + "$").r)
valuePattern = Some(("^(?ims)(.*)$").r)
}
override def isValueAllowed(value: Any): Boolean = {
!isVoidText(value.asInstanceOf[String]) &&
ItNameForbiddenWordsR.findFirstIn(value.asInstanceOf[String]).isEmpty &&
ItNameMinR.findFirstIn(value.asInstanceOf[String]).nonEmpty
}
override def parseValue(raw: String): Any = {
if (Option(raw).isEmpty) None
else StringUtils.normalizeSpace(raw).split(",")(0)
}
override def buildProperties(phrase: Phrase, parseResult: ParseResult, params: Seq[Any]): Map[CandidateFeatureType, Any] = {
val phraseType = phraseTypesStore.findType(SupportedLocales.ITALY, NAME, phrase.text)
Map(MetaPhraseType -> phraseType)
}
}
| kveskimae/pdfalg | src/main/scala/org/pdfextractor/algorithm/finder/it/ItalianNameFinder.scala | Scala | mit | 1,722 |
package edu.scalanus.compiler
import edu.scalanus.ir.IrTreePrettyPrinter
import edu.scalanus.{EngineTest, FileFixtureTest}
import org.scalatest.{FunSuite, Matchers}
import scala.io.Source
class ScalanusCompilerTest extends FunSuite with Matchers with FileFixtureTest with EngineTest {
override protected def basePath: String = "compiler/fixtures"
fixtures foreach { f =>
test(getFixtureName(f)) {
val engine = createEngine
val compiled = engine.compile(Source.fromFile(f).reader()).asInstanceOf[ScalanusCompiledScript]
val actual = IrTreePrettyPrinter(compiled.ir)
val results = getOrCreateResults(f, actual)
actual shouldEqual results
}
}
}
| mkaput/scalanus | src/test/scala/edu/scalanus/compiler/ScalanusCompilerTest.scala | Scala | mit | 692 |
/*
Copyright 2017-2020 Erik Erlandson
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package coulomb
import singleton.ops._
import spire.math._
import coulomb.define._
import coulomb.si._
/** Accepted non-SI metric units (Gram, Liter, Percent, etc) */
package object accepted {
trait Percent
implicit val defineUnitPercent = DerivedUnit[Percent, Unitless](Rational(1, 100), abbv = "%")
trait Degree
implicit val defineUnitDegree = DerivedUnit[Degree, Unitless](scala.math.Pi / 180.0, abbv = "°")
trait ArcMinute
implicit val defineUnitArcMinute = DerivedUnit[ArcMinute, Degree](Rational(1, 60), abbv = "'")
trait ArcSecond
implicit val defineUnitArcSecond = DerivedUnit[ArcSecond, Degree](Rational(1, 3600), abbv = "\\"")
trait Hectare
implicit val defineUnitHectare = DerivedUnit[Hectare, Meter %^ 2](10000, abbv = "ha")
trait Liter
implicit val defineUnitLiter = DerivedUnit[Liter, Meter %^ 3](Rational(1, 1000))
trait Milliliter
implicit val defineUnitMilliliter = DerivedUnit[Milliliter, Liter](Rational(1, 1000), abbv = "ml")
trait Tonne
implicit val defineUnitTonne = DerivedUnit[Tonne, Kilogram](1000)
trait Millibar
implicit val defineUnitMillibar = DerivedUnit[Millibar, Kilogram %/ (Meter %* (Second %^ 2))](100, abbv = "mbar")
trait Kilometer
implicit val defineUnitKilometer = DerivedUnit[Kilometer, Meter](1000, abbv = "km")
trait Millimeter
implicit val defineUnitMillimeter = DerivedUnit[Millimeter, Meter](Rational(1, 1000), abbv = "mm")
trait Centimeter
implicit val defineUnitCentimeter = DerivedUnit[Centimeter, Meter](Rational(1, 100), abbv = "cm")
trait Gram
implicit val defineUnitGram = DerivedUnit[Gram, Kilogram](Rational(1, 1000), abbv = "g")
}
| erikerlandson/coulomb | coulomb-accepted-units/src/main/scala/coulomb/accepted/package.scala | Scala | apache-2.0 | 2,217 |
/*
* Copyright 2016 Alexey Kuzin <amkuzink@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package choiceroulette.gui.menubar
import choiceroulette.gui.{GuiApplication, ViewType}
import scalafx.Includes.handle
import scalafx.geometry.Insets
import scalafx.scene.control.{Menu, MenuBar, MenuItem}
import scalafx.scene.input.{KeyCode, KeyCodeCombination, KeyCombination}
/** Application menu bar.
*
* @author Alexey Kuzin <amkuzink@gmail.com>
*/
class AppMenuBar(menuBarController: MenuBarController) extends MenuBar {
private trait ResetableMenu {
def resetItems(): Unit
}
private val mHelpMenu = new Menu("Help") {
private val mAboutItem = new MenuItem("About") {
onAction = handle(mAboutStage.show())
}
private lazy val mAboutStage = new AboutStage(GuiApplication.stage)
items = List(mAboutItem)
}
private val mFileMenu = new Menu("File") {
private val mApplyCss = new MenuItem("Apply Theme") {
accelerator = new KeyCodeCombination(KeyCode.A, KeyCombination.ShortcutDown, KeyCombination.ShiftDown)
onAction = handle(menuBarController.openCssFile())
}
private val mGrabFromFile = new MenuItem("Grab Values From...") {
accelerator = new KeyCodeCombination(KeyCode.G, KeyCombination.ShortcutDown)
onAction = handle(menuBarController.chooseFileToGrab())
}
private val mSaveResult = new MenuItem("Save Result To...") {
accelerator = new KeyCodeCombination(KeyCode.S, KeyCombination.ShortcutDown)
onAction = handle(menuBarController.chooseSaveFile())
}
items = List(mApplyCss, mGrabFromFile, mSaveResult)
}
private val mViewMenu = new Menu("View") with ResetableMenu {
private def changeViewTitle: String = neededViewType.toString + " View"
private def neededViewType: ViewType = {
menuBarController.viewType match {
case ViewType.Normal => ViewType.Compact
case ViewType.Compact => ViewType.Normal
}
}
private val mChangeView = new MenuItem(changeViewTitle) {
accelerator = new KeyCodeCombination(KeyCode.C, KeyCombination.ShortcutDown, KeyCombination.ShiftDown)
onAction = handle {
menuBarController.viewType = neededViewType
text = changeViewTitle
}
}
items = List(mChangeView)
override def resetItems(): Unit = {
mChangeView.text = changeViewTitle
}
}
private val mRunMenu = new Menu("Run") with ResetableMenu {
private def grabbingTitle: String = {
if (menuBarController.isGrabbingEnabled)
"Stop Grabbing"
else
"Start Grabbing"
}
private val mGrabbing = new MenuItem(grabbingTitle) {
accelerator = new KeyCodeCombination(KeyCode.R, KeyCombination.ShortcutDown)
onAction = handle {
menuBarController.setGrabbing(!menuBarController.isGrabbingEnabled)
text = grabbingTitle
}
}
items = List(mGrabbing)
override def resetItems(): Unit = {
mGrabbing.text = grabbingTitle
}
}
def resetMenu(): Unit = {
mRunMenu.resetItems()
mViewMenu.resetItems()
}
useSystemMenuBar = true
padding = Insets(0)
style = "-fx-border-style: solid;" +
"-fx-border-color: grey;" +
"-fx-border-width: 0 1px 1px 0;"
menus = List(mFileMenu, mViewMenu, mRunMenu, mHelpMenu)
}
| leviathan941/choiceroulette | guiapp/src/main/scala/choiceroulette/gui/menubar/AppMenuBar.scala | Scala | apache-2.0 | 3,834 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.common.data
import slamdata.Predef._
import quasar.Qspec
import quasar.contrib.iota.copkTraverse
import quasar.ejson.EJson
import qdata.time.DateTimeInterval
import matryoshka.implicits._
import matryoshka.patterns._
import scalaz._, Scalaz._
import java.time.{LocalDate, LocalTime}
class DataSpec extends Qspec with DataGenerators {
def roundtrip(data: Data): Option[Data] =
data.hyloM[Option, CoEnv[Data, EJson, ?], Data](
interpretM[Option, EJson, Data, Data]({
case d @ Data.NA => d.some // Data.NA does not roundtrip
case _ => None
},
Data.fromEJson >>> (_.some)),
Data.toEJson[EJson].apply(_).some)
"round trip a date" >> {
val data: Data = Data.LocalDate(LocalDate.of(1992, 6, 30))
roundtrip(data) must_=== data.some
}
"round trip a time" >> {
val data: Data = Data.LocalTime(LocalTime.of(7, 16, 30, 17))
roundtrip(data) must_=== data.some
}
"round trip an interval" >> {
val data: Data = Data.Interval(DateTimeInterval.make(1982, 4, 12, 18, 123456789))
roundtrip(data) must_=== data.some
}
"round trip an interval to nano precision" >> {
val data: Data = Data.Interval(DateTimeInterval.ofNanos(1811451749862000000L))
roundtrip(data) must_=== data.some
}
"round trip Data => EJson => Data" >> prop { data: Data =>
roundtrip(data) must_=== data.some
}.set(minTestsOk = 1000)
}
| slamdata/quasar | common/src/test/scala/quasar/common/data/DataSpec.scala | Scala | apache-2.0 | 2,018 |
package com.github.vitalsoftware.scalaredox.models
import com.github.vitalsoftware.macros._
import com.github.vitalsoftware.util.RobustPrimitives
/**
* Provider responsible for a Document
*
* @param ID ID of the Provider responsible for the document. This ID is required for Inpatient Visits
* @param IDType ID type of the ID for the Provider responsible for the document
* @param FirstName First name of the Provider responsible for the document
* @param LastName Last name of the Provider responsible for the document
* @param Type The type of provider for this referral. One of the following: "Referring Provider", "Referred To Provider", "Other", "Patient PCP"
* @param Credentials List of credentials for the Provider responsible for the document. e.g. MD, PhD
* @param Address Provider's address
*/
@jsonDefaults case class Provider(
ID: Option[String] = None,
IDType: Option[String] = None,
FirstName: Option[String] = None,
LastName: Option[String] = None,
Type: Option[String] = None,
Credentials: Seq[String] = Seq.empty,
Address: Option[Address] = None,
Location: Option[CareLocation] = None,
PhoneNumber: Option[PhoneNumber] = None,
EmailAddresses: Seq[String] = Seq.empty,
Role: Option[BasicCode] = None
) extends ProviderLike
object Provider extends RobustPrimitives
trait ProviderLike {
def ID: Option[String]
def IDType: Option[String]
def FirstName: Option[String]
def LastName: Option[String]
def Type: Option[String]
def Credentials: Seq[String]
def Address: Option[Address]
def Location: Option[CareLocation]
}
trait WithAddress { def Address: Option[Address] }
trait WithPhoneNumber { def PhoneNumber: Option[PhoneNumber] }
trait WithEmails { def EmailAddresses: Seq[String] }
trait WithContactDetails extends WithAddress with WithPhoneNumber with WithEmails
@jsonDefaults case class BasicPerson(
FirstName: Option[String] = None,
LastName: Option[String] = None,
Address: Option[Address] = None,
PhoneNumber: Option[PhoneNumber] = None,
EmailAddresses: Seq[String] = Seq.empty,
Credentials: Option[String] = None
) extends WithContactDetails
object BasicPerson extends RobustPrimitives
| vital-software/scala-redox | src/main/scala/com/github/vitalsoftware/scalaredox/models/Provider.scala | Scala | mit | 2,175 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks.utils
import java.net._
import java.security.MessageDigest
import android.animation.{Animator, AnimatorListenerAdapter}
import android.content.pm.PackageManager
import android.content.{Context, Intent}
import android.graphics._
import android.os.Build
import android.provider.Settings
import android.util.{Base64, DisplayMetrics, Log}
import android.view.View.MeasureSpec
import android.view.{Gravity, View, Window}
import android.widget.Toast
import com.github.shadowsocks.ShadowsocksApplication.app
import com.github.shadowsocks.{BuildConfig, ShadowsocksRunnerService}
import eu.chainfire.libsuperuser.Shell
import org.xbill.DNS._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Try}
object Utils {
private val TAG = "Shadowsocks"
def isLollipopOrAbove: Boolean = Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP
def getSignature(context: Context): String = {
val info = context
.getPackageManager
.getPackageInfo(context.getPackageName, PackageManager.GET_SIGNATURES)
val mdg = MessageDigest.getInstance("SHA-1")
mdg.update(info.signatures(0).toByteArray)
new String(Base64.encode(mdg.digest, 0))
}
def dpToPx(context: Context, dp: Int): Int =
Math.round(dp * (context.getResources.getDisplayMetrics.xdpi / DisplayMetrics.DENSITY_DEFAULT))
/*
* round or floor depending on whether you are using offsets(floor) or
* widths(round)
*/
// Based on: http://stackoverflow.com/a/21026866/2245107
def positionToast(toast: Toast, view: View, window: Window, offsetX: Int = 0, offsetY: Int = 0) = {
val rect = new Rect
window.getDecorView.getWindowVisibleDisplayFrame(rect)
val viewLocation = new Array[Int](2)
view.getLocationInWindow(viewLocation)
val metrics = new DisplayMetrics
window.getWindowManager.getDefaultDisplay.getMetrics(metrics)
val toastView = toast.getView
toastView.measure(MeasureSpec.makeMeasureSpec(metrics.widthPixels, MeasureSpec.UNSPECIFIED),
MeasureSpec.makeMeasureSpec(metrics.heightPixels, MeasureSpec.UNSPECIFIED))
toast.setGravity(Gravity.LEFT | Gravity.TOP,
viewLocation(0) - rect.left + (view.getWidth - toast.getView.getMeasuredWidth) / 2 + offsetX,
viewLocation(1) - rect.top + view.getHeight + offsetY)
toast
}
def crossFade(context: Context, from: View, to: View) {
def shortAnimTime = context.getResources.getInteger(android.R.integer.config_shortAnimTime)
to.setAlpha(0)
to.setVisibility(View.VISIBLE)
to.animate().alpha(1).setDuration(shortAnimTime)
from.animate().alpha(0).setDuration(shortAnimTime).setListener(new AnimatorListenerAdapter {
override def onAnimationEnd(animation: Animator) = from.setVisibility(View.GONE)
})
}
def printToFile(f: java.io.File)(op: java.io.PrintWriter => Unit) {
val p = new java.io.PrintWriter(f)
try {
op(p)
} finally {
p.close()
}
}
// Because /sys/class/net/* isn't accessible since API level 24
final val FLUSH_DNS = "for if in /sys/class/net/*; do " +
"if [ \\"down\\" != $(cat $if/operstate) ]; then " + // up or unknown
"ndc resolver flushif ${if##*/}; " +
"fi " +
"done; echo done"
// Blocked > 3 seconds
def toggleAirplaneMode(context: Context) = {
val result = Shell.SU.run(FLUSH_DNS)
if (result != null && !result.isEmpty) true else if (Build.VERSION.SDK_INT < 17) {
toggleBelowApiLevel17(context)
true
} else false
}
//noinspection ScalaDeprecation
private def toggleBelowApiLevel17(context: Context) {
// Android 4.2 below
Settings.System.putInt(context.getContentResolver, Settings.System.AIRPLANE_MODE_ON, 1)
val enableIntent = new Intent(Intent.ACTION_AIRPLANE_MODE_CHANGED)
enableIntent.putExtra("state", true)
context.sendBroadcast(enableIntent)
Thread.sleep(3000)
Settings.System.putInt(context.getContentResolver, Settings.System.AIRPLANE_MODE_ON, 0)
val disableIntent = new Intent(Intent.ACTION_AIRPLANE_MODE_CHANGED)
disableIntent.putExtra("state", false)
context.sendBroadcast(disableIntent)
}
def resolve(host: String, addrType: Int): Option[String] = {
try {
val lookup = new Lookup(host, addrType)
val resolver = new SimpleResolver("114.114.114.114")
resolver.setTimeout(5)
lookup.setResolver(resolver)
val result = lookup.run()
if (result == null) return None
val records = scala.util.Random.shuffle(result.toList)
for (r <- records) {
addrType match {
case Type.A =>
return Some(r.asInstanceOf[ARecord].getAddress.getHostAddress)
case Type.AAAA =>
return Some(r.asInstanceOf[AAAARecord].getAddress.getHostAddress)
}
}
} catch {
case e: Exception =>
}
None
}
def resolve(host: String): Option[String] = {
try {
val addr = InetAddress.getByName(host)
Some(addr.getHostAddress)
} catch {
case e: UnknownHostException => None
}
}
def resolve(host: String, enableIPv6: Boolean): Option[String] = {
if (enableIPv6 && Utils.isIPv6Support) {
resolve(host, Type.AAAA) match {
case Some(addr) =>
return Some(addr)
case None =>
}
}
resolve(host, Type.A) match {
case Some(addr) =>
return Some(addr)
case None =>
}
resolve(host) match {
case Some(addr) =>
return Some(addr)
case None =>
}
None
}
private lazy val isNumericMethod = classOf[InetAddress].getMethod("isNumeric", classOf[String])
def isNumeric(address: String): Boolean = isNumericMethod.invoke(null, address).asInstanceOf[Boolean]
/**
* If there exists a valid IPv6 interface
*/
def isIPv6Support: Boolean = {
try {
val interfaces = NetworkInterface.getNetworkInterfaces
while (interfaces.hasMoreElements) {
val intf = interfaces.nextElement()
val addrs = intf.getInetAddresses
while (addrs.hasMoreElements) {
val addr = addrs.nextElement()
if (!addr.isLoopbackAddress && !addr.isLinkLocalAddress) {
if (addr.isInstanceOf[Inet6Address]) {
if (BuildConfig.DEBUG) Log.d(TAG, "IPv6 address detected")
return true
}
}
}
}
} catch {
case ex: Exception =>
Log.e(TAG, "Failed to get interfaces' addresses.", ex)
}
false
}
def startSsService(context: Context) {
val isInstalled: Boolean = app.settings.getBoolean(app.getVersionName, false)
if (!isInstalled) return
val intent = new Intent(context, classOf[ShadowsocksRunnerService])
context.startService(intent)
}
def stopSsService(context: Context) {
val intent = new Intent(Action.CLOSE)
context.sendBroadcast(intent)
}
private val handleFailure: Try[_] => Unit = {
case Failure(e) => e.printStackTrace()
case _ =>
}
def ThrowableFuture[T](f: => T) = Future(f) onComplete handleFailure
}
| otoil/shadowsocks-android | src/main/scala/com/github/shadowsocks/utils/Utils.scala | Scala | gpl-3.0 | 8,879 |
package obj
object Material extends Enumeration {
type Material = Value
val PLASTIC, GLASS, METAL, PAPER = Value
} | dajvido/AI-project | src/obj/Material.scala | Scala | mit | 120 |
package geek.lawsof.physics.lib.equip
import java.util
import cpw.mods.fml.common.registry.GameRegistry
import geek.lawsof.physics.Reference
import geek.lawsof.physics.lib.CreativeTabBase
import geek.lawsof.physics.lib.equip.types.ArmorTypeBase
import net.minecraft.entity.Entity
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.{ItemArmor, ItemStack}
/**
* Created by anshuman on 28-05-2014.
*/
object ArmorBase {
val suffixes = Array("helmet", "chestplate", "leggings", "boots")
}
class ArmorBase(val armorMaterial: ArmorTypeBase, val armorPart: Int, ctab: CreativeTabBase) extends ItemArmor(armorMaterial.armorType, 0, armorPart) {
val intName = s"${armorMaterial.armorName}_${ArmorBase.suffixes(armorPart)}"
setTextureName(s"${Reference.MOD_ID}:$intName")
setUnlocalizedName(intName)
setCreativeTab(ctab)
// Recipes.shapedRecipe(new ItemStack(this), ArmorTypeBase.craftingRecipes(armorPart), 'x', armorMaterial.craftMaterial)
GameRegistry.registerItem(this, intName)
override def addInformation(stack: ItemStack, par2EntityPlayer: EntityPlayer, par3List: util.List[_], par4: Boolean) = {
var list = par3List.asInstanceOf[util.List[String]]
list.add(s"HP : ${stack.getMaxDamage - stack.getItemDamage}/${stack.getMaxDamage}")
list.add(s"Protection : ${armorMaterial.reductionAmounts(armorPart)}")
}
override def getArmorTexture(stack: ItemStack, entity: Entity, slot: Int, `type`: String): String = {
val layer = slot match {
case 2 => "_2"
case _ => "_1"
}
val armorName = stack.getItem.asInstanceOf[ArmorBase].getArmorMaterial.name()
// val armorName = this.getArmorMaterial.name()
s"${Reference.MOD_ID}:/textures/models/armor/$armorName$layer.png"
}
def newItemStack(amt: Int = 1) = new ItemStack(this, amt)
}
| GeckoTheGeek42/TheLawsOfPhysics | src/main/scala/geek/lawsof/physics/lib/equip/ArmorBase.scala | Scala | mit | 1,823 |
/*
* Copyright 2014 Renaud Bruneliere
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalaopt.stdapps.learning.nnet
import com.github.bruneli.scalaopt.core._
import com.github.bruneli.scalaopt.stdapps.learning.nnet.{FFNeuralNetwork, LossType}
import com.github.bruneli.scalaopt.stdapps.learning.nnet.activation.{LinearFunction, LogisticFunction}
import org.scalatest._
import org.scalatest.Matchers._
import LossType._
import com.github.bruneli.scalaopt.core.linalg.DenseVector
import com.github.bruneli.scalaopt.core.variable._
/**
* @author bruneli
*/
class FFNeuralNetworkSpec extends FlatSpec with Matchers {
"countWeights" should "count (24,15) weights for a network with (5,4,3) neurons" in {
val weightsPerLayer = FFNeuralNetwork.countWeights(Vector(5, 4, 3))
weightsPerLayer shouldBe Vector((5 + 1) * 4, (4 + 1) * 3)
}
"splitWeights" should "split 39 weights into 24 + 15 weights" in {
val weights1 = DenseVector.fill[UnconstrainedVariable](24)(1.0)
val weights2 = DenseVector.fill[UnconstrainedVariable](15)(2.0)
val weightsPerLayer = FFNeuralNetwork.splitWeights(Vector(24, 15), weights1 ++ weights2)
weightsPerLayer should have size 2
weightsPerLayer(0).length shouldBe 24
weightsPerLayer(0) shouldBe weights1
weightsPerLayer(1).length shouldBe 15
weightsPerLayer(1) shouldBe weights2
}
it should "split 40 weights into equal parts of 10" in {
val weights = new UnconstrainedVariables((0 until 40).map(i => (i / 10).toDouble).toArray)
val weightsPerNeuron = FFNeuralNetwork.splitWeights(10, weights)
weightsPerNeuron should have size 4
for ((weights, index) <- weightsPerNeuron.zipWithIndex) {
weights shouldBe (1 to 10).map(i => index.toDouble).toVector
}
}
"forward" should "propagate inputs up to the output layer" in {
val neuronsPerLayer = Vector(3, 5, 1)
val network = FFNeuralNetwork(neuronsPerLayer, 0.5, MeanSquaredError, LogisticFunction, LinearFunction)
val weightsPerLayer = FFNeuralNetwork.countWeights(neuronsPerLayer)
val weights = FFNeuralNetwork.splitWeights(weightsPerLayer, network.weights)
.zipWithIndex
.map { case (w, i) => FFNeuralNetwork.splitWeights(neuronsPerLayer(i) + 1, w) }
val inputs = Inputs(1.0, 0.5, 1.5)
val activatedNetwork = network.forward(inputs)
// Hidden layer with sigmoid response
val weights1 = weights(0)
val outputs1 =
for ((neuron, i) <- activatedNetwork.layers(0).zipWithIndex) yield {
val excitation = weights1(i).force.head + (inputs dot weights1(i).force.tail)
val output = LogisticFunction(excitation)
neuron.excitation shouldBe excitation +- 1.0e-8
neuron.output shouldBe output +- 1.0e-8
(excitation, output)
}
// Output layer with linear response
val weights2 = weights(1)
for ((neuron, i) <- activatedNetwork.layers(1).zipWithIndex) {
val outputsLayer1 = new Outputs(outputs1.map(_._2).toArray)
val excitation = weights2(i).force.head + (outputsLayer1 dot weights2(i).force.tail)
val output = LinearFunction(excitation)
neuron.excitation shouldBe excitation +- 1.0e-8
neuron.output shouldBe output +- 1.0e-8
}
}
"backward" should "propagate target responses down to the inner layer" in {
val neuronsPerLayer = Vector(3, 5, 1)
val network = FFNeuralNetwork(neuronsPerLayer, 0.5, MeanSquaredError, LogisticFunction, LinearFunction)
val weightsPerLayer = FFNeuralNetwork.countWeights(neuronsPerLayer)
val weights = FFNeuralNetwork.splitWeights(weightsPerLayer, network.weights)
.zipWithIndex
.map { case (w, i) => FFNeuralNetwork.splitWeights(neuronsPerLayer(i) + 1, w) }
val inputs = Inputs(1.0, 0.5, 1.5)
val targets = Outputs(1.0)
val finalNetwork = network.forward(inputs).backward(targets)
// Propagate inputs in hidden layer
val weights1 = weights(0)
val outputs1 =
for ((neuron, i) <- finalNetwork.layers(0).zipWithIndex) yield {
val excitation = weights1(i).force.head + (inputs dot weights1(i).force.tail)
LogisticFunction(excitation)
}
// Check gradient in output layer
val weights2 = weights(1)
val deltas = for ((neuron, i) <- finalNetwork.layers(1).zipWithIndex) yield {
val outputsLayer1 = new Outputs(outputs1.toArray)
val excitation = weights2(i).force.head + (outputsLayer1 dot weights2(i).force.tail)
val output = LinearFunction(excitation)
val delta = output - targets(0)
for ((derivative, j) <- neuron.gradient.force.zipWithIndex) {
val expDerivative = if (j == 0) delta else outputs1(j - 1) * delta
derivative shouldBe expDerivative +- 1.0e-8
}
delta
}
// Propagate errors to the inner layer and check gradient
for ((neuron, i) <- finalNetwork.layers(0).zipWithIndex) yield {
val error = deltas(0) * weights2(0)(i + 1) * LogisticFunction.derivative(neuron.output)
for ((derivative, j) <- neuron.gradient.force.zipWithIndex) {
val expDerivative = if (j == 0) error else inputs(j - 1) * error
derivative shouldBe expDerivative +- 1.0e-8
}
}
}
"gradient" should "be output - target when working with cross-entropy" in {
val trueWeights = UnconstrainedVariables(0.1, -0.25, 0.5)
val network = FFNeuralNetwork(
Vector(2, 1),
trueWeights,
CrossEntropy,
LogisticFunction,
LogisticFunction)
val inputs = Inputs(0.5, 1.5)
val targets = Outputs(1.0)
val eps = 1.0e-8
val finalNetwork = network.forward(inputs).backward(targets)
def output(x: InputsType) = {
val net = (Input(1.0) +: x.force) dot trueWeights
1.0 / (1.0 + Math.exp(-net))
}
def f(x: InputsType, y: Output, weights: UnconstrainedVariablesType = trueWeights) = {
val net = (Input(1.0) +: x.force) dot weights
val output = 1.0 / (1.0 + Math.exp(-net))
val entropy0 = if (y > 0.0) -y * Math.log(output / y) else 0.0
val entropy1 = if (y < 1.0) (1.0 - y) * Math.log((1.0 - output) / (1.0 - y)) else 0.0
entropy0 + entropy1
}
def df(x: InputsType, y: Output) = {
val error = output(x) - y
(Input(1.0) +: x.force) * error
}
val gradient1 = finalNetwork.gradient
val gradient2 = df(inputs, targets.head)
val loss = f(inputs, targets.head)
finalNetwork.outputs.coordinate(0) shouldBe output(inputs) +- 1.0e-5
finalNetwork.loss shouldBe loss +- 1.0e-5
for {(dx, i) <- gradient1.force.zip(gradient2.force).zipWithIndex
(dx1, dx2) = dx} {
val dw = trueWeights.updated(i, trueWeights(i) + eps)
val dloss = f(inputs, targets.head, dw)
val dx3 = (dloss - loss) / eps
dx1.x shouldBe dx2.x +- 1.0e-5
dx1.x shouldBe dx3 +- 1.0e-5
}
}
}
| bruneli/scalaopt | std-apps/src/test/scala/com.github.bruneli.scalaopt.stdapps.learning.nnet/FFNeuralNetworkSpec.scala | Scala | apache-2.0 | 7,322 |
object Test {
def main(args: Array[String]): Unit = {
mcr()
}
}
| som-snytt/dotty | tests/run-macros/i7048/Test_2.scala | Scala | apache-2.0 | 72 |
/*
* Copyright (C) 2012 Romain Reuillon
* Copyright (C) 2014 Jonathan Passerat-Palmbach
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.environment.slurm
import fr.iscpif.gridscale.ssh.{ SSHConnectionCache, SSHAuthentication, SSHJobService, SSHHost }
import fr.iscpif.gridscale.slurm.{ SLURMJobService ⇒ GSSLURMJobService, SLURMJobDescription }
import java.net.URI
import org.openmole.core.batch.control._
import org.openmole.core.batch.environment._
import org.openmole.core.batch.jobservice.{ BatchJob, BatchJobId }
import org.openmole.core.tools.service.Logger
import org.openmole.core.workspace.Workspace
import org.openmole.plugin.environment.ssh.{ SharedStorage, SSHService }
import org.openmole.core.batch.storage.SimpleStorage
import org.openmole.plugin.environment.gridscale._
import concurrent.duration._
object SLURMJobService extends Logger
import SLURMJobService._
trait SLURMJobService extends GridScaleJobService with SSHHost with SharedStorage { js ⇒
def environment: SLURMEnvironment
val jobService = new GSSLURMJobService with SSHConnectionCache {
def host = js.host
def user = js.user
def credential = js.credential
override def port = js.port
override def timeout = Workspace.preferenceAsDuration(SSHService.timeout)
}
protected def _submit(serializedJob: SerializedJob) = {
val (remoteScript, result) = buildScript(serializedJob)
val jobDescription = new SLURMJobDescription {
val executable = "/bin/bash"
val arguments = remoteScript
override val queue = environment.queue
val workDirectory = serializedJob.path
override val wallTime = environment.wallTime
override val memory = Some(environment.requiredMemory)
override val nodes = environment.nodes
override val coresByNode = environment.coresByNode orElse environment.threads
override val qos = environment.qos
override val gres = environment.gres
override val constraints = environment.constraints
// TODO nodes and coreByNode not supported (yet) by the SLURM plugin in GridScale
/// @see threads
// override val nodes = environment.nodes orElse environment.threads
// override val coreByNode = environment.coreByNode orElse environment.threads
}
val job = js.jobService.submit(jobDescription)
Log.logger.fine(s"SLURM job [${job.slurmId}], description: \\n ${jobDescription.toSLURM}")
new BatchJob with BatchJobId {
val jobService = js
val id = job
val resultPath = result
}
}
}
| ISCPIF/PSEExperiments | openmole-src/openmole/plugins/org.openmole.plugin.environment.slurm/src/main/scala/org/openmole/plugin/environment/slurm/SLURMJobService.scala | Scala | agpl-3.0 | 3,181 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.convert.text
import java.io._
import com.typesafe.config.Config
import org.apache.commons.csv.{CSVFormat, QuoteMode}
import org.locationtech.geomesa.convert.Transformers.{EvaluationContext, Expr}
import org.locationtech.geomesa.convert.{AbstractSimpleFeatureConverterFactory, Field, LinesToSimpleFeatureConverter}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.immutable.IndexedSeq
class DelimitedTextConverterFactory extends AbstractSimpleFeatureConverterFactory[String] {
override protected val typeToProcess = "delimited-text"
val QUOTED = CSVFormat.DEFAULT.withQuoteMode(QuoteMode.ALL)
val QUOTE_ESCAPE = CSVFormat.DEFAULT.withEscape('"')
val QUOTED_WITH_QUOTE_ESCAPE = QUOTE_ESCAPE.withQuoteMode(QuoteMode.ALL)
override protected def buildConverter(sft: SimpleFeatureType,
conf: Config,
idBuilder: Expr,
fields: IndexedSeq[Field],
userDataBuilder: Map[String, Expr],
validating: Boolean): DelimitedTextConverter = {
var baseFmt = conf.getString("format").toUpperCase match {
case "CSV" | "DEFAULT" => CSVFormat.DEFAULT
case "EXCEL" => CSVFormat.EXCEL
case "MYSQL" => CSVFormat.MYSQL
case "TDF" | "TSV" | "TAB" => CSVFormat.TDF
case "RFC4180" => CSVFormat.RFC4180
case "QUOTED" => QUOTED
case "QUOTE_ESCAPE" => QUOTE_ESCAPE
case "QUOTED_WITH_QUOTE_ESCAPE" => QUOTED_WITH_QUOTE_ESCAPE
case _ => throw new IllegalArgumentException("Unknown delimited text format")
}
import org.locationtech.geomesa.utils.conf.ConfConversions._
val opts = {
val o = "options"
val dOpts = new DelimitedOptions()
conf.getIntOpt(s"$o.skip-lines").foreach(s => dOpts.skipLines = s)
conf.getIntOpt(s"$o.pipe-size").foreach(p => dOpts.pipeSize = p)
dOpts
}
conf.getStringOpt("options.quote").foreach { q =>
require(q.length == 1, "Quote must be a single character")
baseFmt = baseFmt.withQuote(q.toCharArray()(0))
}
conf.getStringOpt("options.escape").foreach { q =>
require(q.length == 1, "Escape must be a single character")
baseFmt = baseFmt.withEscape(q.toCharArray()(0))
}
new DelimitedTextConverter(baseFmt, sft, idBuilder, fields, userDataBuilder, opts, validating)
}
}
class DelimitedOptions(var skipLines: Int = 0, var pipeSize: Int = 16 * 1024)
class DelimitedTextConverter(format: CSVFormat,
val targetSFT: SimpleFeatureType,
val idBuilder: Expr,
val inputFields: IndexedSeq[Field],
val userDataBuilder: Map[String, Expr],
val options: DelimitedOptions,
val validating: Boolean)
extends LinesToSimpleFeatureConverter {
override def processInput(is: Iterator[String], ec: EvaluationContext): Iterator[SimpleFeature] = {
ec.counter.incLineCount(options.skipLines)
super.processInput(is.drop(options.skipLines), ec)
}
override def fromInputType(string: String): Seq[Array[Any]] = {
if (string == null || string.isEmpty) {
throw new IllegalArgumentException("Invalid input (empty)")
}
val rec = format.parse(new StringReader(string)).iterator().next()
val len = rec.size()
val ret = Array.ofDim[Any](len + 1)
ret(0) = string
var i = 0
while (i < len) {
ret(i+1) = rec.get(i)
i += 1
}
Seq(ret)
}
}
| tkunicki/geomesa | geomesa-convert/geomesa-convert-text/src/main/scala/org/locationtech/geomesa/convert/text/DelimitedTextConverter.scala | Scala | apache-2.0 | 4,277 |
package org.jetbrains.plugins.scala.codeInspection.collections
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.{ChangeReferenceNameQuickFix, InspectionBundle}
import org.jetbrains.plugins.scala.extensions.ExpressionType
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlock, ScExpression, ScFunctionExpr}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScEarlyDefinitions
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.types.{ScFunctionType, Unit}
/**
* @author Nikolay.Tropin
*/
class UnitInMapInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: Array[SimplificationType] = Array()
override def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case MethodRepr(call, _, Some(ref), Seq(arg @ lambdaWithBody(body)))
if ref.refName == "map" && checkResolve(ref, getLikeCollectionClasses) =>
val isInBlock = call.getParent match {
case _: ScBlock | _: ScTemplateBody | _: ScEarlyDefinitions | _: ScalaFile => true
case _ => false
}
val fixes =
if (isInBlock) Seq(new ChangeReferenceNameQuickFix(InspectionBundle.message("use.foreach.instead.of.map"), ref, "foreach"))
else Seq.empty
val unitTypeReturns = body.calculateReturns().collect {
case expr @ ExpressionType(ft @ ScFunctionType(Unit, _)) if arg.getType().getOrAny.equiv(ft) => expr
case expr @ ExpressionType(Unit) => expr
}.filter(_.getTextLength > 0)
unitTypeReturns.foreach { e =>
if (e.isPhysical)
holder.registerProblem(e, InspectionBundle.message("expression.unit.return.in.map"), highlightType, fixes: _*)
}
}
object lambdaWithBody {
def unapply(expr: ScExpression): Option[ScExpression] = {
expr match {
case ScBlock(ScFunctionExpr(_, res)) => res
case ScFunctionExpr(_, res) => res
case e => Some(e)
}
}
}
} | LPTK/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/collections/UnitInMapInspection.scala | Scala | apache-2.0 | 2,157 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.common.json
import com.twitter.zipkin.common.Endpoint
case class JsonTimelineAnnotation(timestamp: Long, value: String, host: Endpoint, spanId: String, parentId: Option[String],
serviceName: String, spanName: String)
| netconstructor/zipkin | zipkin-finatra/src/main/scala/com/twitter/zipkin/common/json/JsonTimelineAnnotation.scala | Scala | apache-2.0 | 877 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.validation
import scala.util.control.NoStackTrace
import io.gatling.BaseSpec
class SafelySpec extends BaseSpec {
"safely" should "returned the provided Validation if it didn't throw exceptions" in {
safely()(1.success) shouldBe 1.success
}
it should "return a failure if the provided Validation threw exceptions" in {
def exceptionThrower = {
def thrower = throw new Exception("Woops") with NoStackTrace
thrower
Success(1)
}
safely()(exceptionThrower) shouldBe "j.l.Exception: Woops".failure
safely(_ + "y")(exceptionThrower) shouldBe "j.l.Exception: Woopsy".failure
}
}
| gatling/gatling | gatling-commons/src/test/scala/io/gatling/commons/validation/SafelySpec.scala | Scala | apache-2.0 | 1,270 |
package com.persist.dst
import org.apache.spark.sql.Column
import scala.reflect.runtime.universe._
object DstColumns {
// TODO desc only available for sort
// TODO add String column type (with compare ops)
// TODO allow ops other than desc on sort???
abstract class DstTransform
private def typeTag[T](implicit tag: WeakTypeTag[T]) = {
tag
}
trait DstColumn[TRANSFORM <: DstTransform] {
val name: String
val col: Column
private def c = col
private def n = name
def desc = new DstColumn[TRANSFORM] {
val col = c.desc
val name = n + ".desc"
}
override def toString() = {
s"Column($name)"
}
}
abstract class AggTypedColumn[TRANSFORM <: DstTransform, TCOL:TypeTag]
abstract class DstTypedColumn[TRANSFORM <: DstTransform, TCOL: TypeTag]
extends AggTypedColumn[TRANSFORM, TCOL] with DstColumn[TRANSFORM] {
def ===(other: DstTypedColumn[TRANSFORM, TCOL]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n == ${other.name}"
val col = c === other.col
}
}
def !==(other: DstTypedColumn[TRANSFORM, TCOL]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n != ${other.name}"
val col = c !== other.col
}
}
def <(other: DstIntColumn[TRANSFORM]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n < ${other.name}"
val col = c < other.col
}
}
def >(other: DstIntColumn[TRANSFORM]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n > ${other.name}"
val col = c > other.col
}
}
def <=(other: DstIntColumn[TRANSFORM]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n <= ${other.name}"
val col = c <= other.col
}
}
def >=(other: DstIntColumn[TRANSFORM]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n >= ${other.name}"
val col = c >= other.col
}
}
override def toString() =
s"Column($name:${typeTag[TCOL].tpe.toString})"
}
abstract class DstIntColumn[TRANSFORM <: DstTransform] extends DstTypedColumn[TRANSFORM, Int] {
def ===(i: Int) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n === $i"
val col = c === i
}
}
def !==(i: Int) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n !== $i"
val col = c !== i
}
}
def +(i: Int) = {
val n = name
val c = col
new DstIntColumn[TRANSFORM] {
val name = s"$n + $i"
val col = c + i
}
}
def +(other: DstIntColumn[TRANSFORM]) = {
val c = col
val n = name
new DstIntColumn[TRANSFORM] {
val name = s"n + ${other.name}"
val col = c + other.col
}
}
def *(i: Int) = {
val n = name
val c = col
new DstIntColumn[TRANSFORM] {
val name = s"$n * $i"
val col = c * i
}
}
def *(other: DstIntColumn[TRANSFORM]) = {
val c = col
val n = name
new DstIntColumn[TRANSFORM] {
val name = s"n * ${other.name}"
val col = c * other.col
}
}
def <(i: Int) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n < $i"
val col = c < i
}
}
def >(i: Int) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n > $i"
val col = c > i
}
}
def <=(i: Int) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n <= $i"
val col = c <= i
}
}
def >=(i: Int) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n >= $i"
val col = c >= i
}
}
def sum = {
new AggIntColumn[TRANSFORM](this, "sum")
}
def count = {
new AggIntColumn[TRANSFORM](this, "count")
}
def max = {
new AggIntColumn[TRANSFORM](this, "max")
}
}
class AggIntColumn[TRANSFORM <: DstTransform](val col:DstIntColumn[TRANSFORM], val kind:String) extends AggTypedColumn[TRANSFORM, Int] {
override def toString() = s"$col.$kind"
}
abstract class DstBooleanColumn[TRANSFORM <: DstTransform] extends DstTypedColumn[TRANSFORM, Boolean] {
def &&(b: Boolean) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n && $b"
val col = c && b
}
}
def &&(other: DstBooleanColumn[TRANSFORM]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n && ${other.name}"
val col = c && other.col
}
}
def ||(b: Boolean) = {
val n = name
val c = col
new DstBooleanColumn[TRANSFORM] {
val name = s"$n || $b"
val col = c || b
}
}
def ||(other: DstBooleanColumn[TRANSFORM]) = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"n || ${other.name}"
val col = c || other.col
}
}
def !() = {
val c = col
val n = name
new DstBooleanColumn[TRANSFORM] {
val name = s"! n"
val col = ! c
}
}
}
}
| nestorpersist/dataset-transform | transforms/src/main/scala/com/persist/dst/DstColumns.scala | Scala | apache-2.0 | 5,600 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.extra
import java.util.UUID
import com.spotify.scio.ScioContext
import com.spotify.scio.annotations.experimental
import com.spotify.scio.values.{SCollection, SideInput}
import org.apache.beam.sdk.transforms.{DoFn, View}
import org.apache.beam.sdk.values.PCollectionView
import org.slf4j.LoggerFactory
import scala.jdk.CollectionConverters._
/**
* Main package for Annoy side input APIs. Import all.
*
* {{{
* import com.spotify.scio.extra.annoy._
* }}}
*
* Two metrics are available, Angular and Euclidean.
*
* To save an `SCollection[(Int, Array[Float])]` to an Annoy file:
*
* {{{
* val s = sc.parallelize(Seq( 1-> Array(1.2f, 3.4f), 2 -> Array(2.2f, 1.2f)))
* }}}
*
* Save to a temporary location:
* {{{
* val s1 = s.asAnnoy(Angular, 40, 10)
* }}}
*
* Save to a specific location:
* {{{
* val s1 = s.asAnnoy(Angular, 40, 10, "gs://<bucket>/<path>")
* }}}
*
* `SCollection[AnnoyUri]` can be converted into a side input:
* {{{
* val s = sc.parallelize(Seq( 1-> Array(1.2f, 3.4f), 2 -> Array(2.2f, 1.2f)))
* val side = s.asAnnoySideInput(metric, dimension, numTrees)
* }}}
*
* There's syntactic sugar for saving an SCollection and converting it to a side input:
* {{{
* val s = sc
* .parallelize(Seq( 1-> Array(1.2f, 3.4f), 2 -> Array(2.2f, 1.2f)))
* .asAnnoySideInput(metric, dimension, numTrees)
* }}}
*
* An existing Annoy file can be converted to a side input directly:
* {{{
* sc.annoySideInput(metric, dimension, numTrees, "gs://<bucket>/<path>")
* }}}
*
* `AnnoyReader` provides nearest neighbor lookups by vector as well as item lookups:
* {{{
* val data = (0 until 1000).map(x => (x, Array.fill(40)(r.nextFloat())))
* val main = sc.parallelize(data)
* val side = main.asAnnoySideInput(metric, dimension, numTrees)
*
* main.keys.withSideInput(side)
* .map { (i, s) =>
* val annoyReader = s(side)
*
* // get vector by item id, allocating a new Array[Float] each time
* val v1 = annoyReader.getItemVector(i)
*
* // get vector by item id, copy vector into pre-allocated Array[Float]
* val v2 = Array.fill(dim)(-1.0f)
* annoyReader.getItemVector(i, v2)
*
* // get 10 nearest neighbors by vector
* val results = annoyReader.getNearest(v2, 10)
* }
* }}}
*/
package object annoy {
sealed abstract class AnnoyMetric
case object Angular extends AnnoyMetric
case object Euclidean extends AnnoyMetric
private val logger = LoggerFactory.getLogger(this.getClass)
/**
* AnnoyReader class for approximate nearest neighbor lookups.
* Supports vector lookup by item as well as nearest neighbor lookup by vector.
*
* @param path Can be either a local file or a GCS location e.g. gs://<bucket>/<path>
* @param metric One of Angular (cosine distance) or Euclidean
* @param dim Number of dimensions in vectors
*/
class AnnoyReader private[annoy] (path: String, metric: AnnoyMetric, dim: Int) {
require(dim > 0, "Vector dimension should be > 0")
import com.spotify.annoy._
private val index = {
val indexType = metric match {
case Angular => IndexType.ANGULAR
case Euclidean => IndexType.EUCLIDEAN
}
new ANNIndex(dim, path, indexType)
}
/** Gets vector associated with item i. */
def getItemVector(i: Int): Array[Float] = index.getItemVector(i)
/** Copies vector associated with item i into vector v. */
def getItemVector(i: Int, v: Array[Float]): Unit = index.getItemVector(i, v)
/** Gets maxNumResults nearest neighbors for vector v. */
def getNearest(v: Array[Float], maxNumResults: Int): Seq[Int] =
index.getNearest(v, maxNumResults).asScala.toSeq.asInstanceOf[Seq[Int]]
}
/** Enhanced version of [[ScioContext]] with Annoy methods. */
implicit class AnnoyScioContext(private val self: ScioContext) extends AnyVal {
/**
* Create a SideInput of [[AnnoyReader]] from an [[AnnoyUri]] base path, to be used with
* [[com.spotify.scio.values.SCollection.withSideInputs SCollection.withSideInputs]]
*
* @param metric Metric (Angular, Euclidean) used to build the Annoy index
* @param dim Number of dimensions in vectors used to build the Annoy index
*/
@experimental
def annoySideInput(path: String, metric: AnnoyMetric, dim: Int): SideInput[AnnoyReader] = {
val uri = AnnoyUri(path, self.options)
val view = self.parallelize(Seq(uri)).applyInternal(View.asSingleton())
new AnnoySideInput(view, metric, dim)
}
}
implicit class AnnoyPairSCollection(@transient private val self: SCollection[(Int, Array[Float])])
extends AnyVal {
/**
* Write the key-value pairs of this SCollection as an Annoy file to a specific location,
* building the trees in the index according to the parameters provided.
*
* @param path Can be either a local file or a GCS location e.g. gs://<bucket>/<path>
* @param metric One of Angular (cosine distance) or Euclidean
* @param dim Number of dimensions in vectors
* @param nTrees Number of trees to build. More trees means more precision & bigger indices.
* If nTrees is set to -1, the trees will automatically be built in such a way
* that they will take at most 2x the memory of the vectors.
* @return A singleton SCollection containing the [[AnnoyUri]] of the saved files
*/
@experimental
def asAnnoy(path: String, metric: AnnoyMetric, dim: Int, nTrees: Int): SCollection[AnnoyUri] = {
val uri = AnnoyUri(path, self.context.options)
require(!uri.exists, s"Annoy URI ${uri.path} already exists")
self.transform { in =>
in.groupBy(_ => ())
.map { case (_, xs) =>
logger.info(s"Saving as Annoy: $uri")
val startTime = System.nanoTime()
val annoyWriter = new AnnoyWriter(metric, dim, nTrees)
try {
val it = xs.iterator
while (it.hasNext) {
val (k, v) = it.next()
annoyWriter.addItem(k, v)
}
val size = annoyWriter.size
uri.saveAndClose(annoyWriter)
val elapsedTime = (System.nanoTime() - startTime) / 1000000000.0
logger.info(s"Built index with $size items in $elapsedTime seconds")
} catch {
case e: Throwable =>
annoyWriter.free()
throw e
}
uri
}
}
}
/**
* Write the key-value pairs of this SCollection as an Annoy file to a temporary location,
* building the trees in the index according to the parameters provided.
*
* @param nTrees Number of trees to build. More trees means more precision & bigger indices.
* If nTrees is set to -1, the trees will automatically be built in such a way
* that they will take at most 2x the memory of the vectors.
* @return A singleton SCollection containing the [[AnnoyUri]] of the saved files
*/
@experimental
def asAnnoy(metric: AnnoyMetric, dim: Int, nTrees: Int): SCollection[AnnoyUri] = {
val uuid = UUID.randomUUID()
val tempLocation = self.context.options.getTempLocation
require(tempLocation != null, s"--tempLocation arg is required")
val path = s"$tempLocation/annoy-build-$uuid"
this.asAnnoy(path, metric, dim, nTrees)
}
/**
* Write the key-value pairs of this SCollection as an Annoy file to a temporary location,
* building the trees in the index according to the parameters provided, then load the
* trees as a side input.
*
* @param metric One of Angular (cosine distance) or Euclidean
* @param dim Number of dimensions in vectors
* @param nTrees Number of trees to build. More trees means more precision & bigger indices.
* If nTrees is set to -1, the trees will automatically be built in such a way
* that they will take at most 2x the memory of the vectors.
* @return SideInput[AnnoyReader]
*/
@experimental
def asAnnoySideInput(metric: AnnoyMetric, dim: Int, nTrees: Int): SideInput[AnnoyReader] =
self.asAnnoy(metric, dim, nTrees).asAnnoySideInput(metric, dim)
}
/** Enhanced version of [[com.spotify.scio.values.SCollection SCollection]] with Annoy methods */
implicit class AnnoySCollection(@transient private val self: SCollection[AnnoyUri])
extends AnyVal {
/**
* Load Annoy index stored at [[AnnoyUri]] in this
* [[com.spotify.scio.values.SCollection SCollection]].
* @param metric Metric (Angular, Euclidean) used to build the Annoy index
* @param dim Number of dimensions in vectors used to build the Annoy index
* @return SideInput[AnnoyReader]
*/
@experimental
def asAnnoySideInput(metric: AnnoyMetric, dim: Int): SideInput[AnnoyReader] = {
val view = self.applyInternal(View.asSingleton())
new AnnoySideInput(view, metric, dim)
}
}
private class AnnoySideInput(val view: PCollectionView[AnnoyUri], metric: AnnoyMetric, dim: Int)
extends SideInput[AnnoyReader] {
override def get[I, O](context: DoFn[I, O]#ProcessContext): AnnoyReader =
context.sideInput(view).getReader(metric, dim)
}
}
| regadas/scio | scio-extra/src/main/scala/com/spotify/scio/extra/annoy/package.scala | Scala | apache-2.0 | 9,924 |
package com.v_standard.vsp.script
import com.v_standard.utils.HtmlUtil
import java.io.ByteArrayOutputStream
import java.text.DecimalFormat
import sun.org.mozilla.javascript.internal.NativeArray
import sun.org.mozilla.javascript.internal.NativeObject
import scala.collection.JavaConverters._
/**
* HTML ファンクションクラス。
*/
class HtmlFunction(val out: ByteArrayOutputStream, val isXhtml: Boolean) {
/** パラメータ:値 */
val PARAM_VALUE = "value"
/** パラメータ:デフォルト */
val PARAM_DEFAULT = "_default"
/** パラメータ:リスト */
val PARAM_LIST = "_list"
/**
* チェックボックスタグ出力。
*
* @param obj 現在値オブジェクト
* @param パラメータ(必須: value, オプション: _xhtml)
*/
def checkbox(obj: Any, param: NativeObject): Unit = out.write(checkboxTag(obj, param).getBytes)
/**
* ラジオボタンタグ出力。
*
* @param obj 現在値オブジェクト
* @param パラメータ(必須: value, オプション: _xhtml, _default)
*/
def radio(obj: Any, param: NativeObject): Unit = out.write(radioTag(obj, param).getBytes)
/**
* セレクトボックスタグ出力。
*
* @param obj 現在値オブジェクト
* @param パラメータ(必須: _list, オプション: _xhtml, _default)
*/
def select(obj: Any, param: NativeObject): Unit = out.write(selectTag(obj, param).getBytes)
/**
* 改行を <br /> に変換。
*
* @param str 文字列
* @return Raw オブジェクト
*/
def br(str: String): Raw = {
Raw(HtmlUtil.crlf2br(HtmlUtil.escape(str)))
}
/**
* 改行を <br /> に変換。
*
* @param str 文字列
* @return Raw オブジェクト
*/
def br(oc: OutputConverter): Raw = {
Raw(HtmlUtil.crlf2br(oc.mkString))
}
/**
* チェックボックスタグ生成。
*
* @param obj 現在値オブジェクト
* @param パラメータ
* @return タグ
*/
protected def checkboxTag(obj: Any, param: NativeObject): String = {
checkRequiredParam(param, List(PARAM_VALUE))
val tag = new StringBuilder("<input type=\\"checkbox\\"").append(createAttrByParam(param))
val xhtml = isXhtml
val currentValue = convertValue(obj)
if (convertValue(param.get(PARAM_VALUE)) == currentValue) {
if (xhtml) tag.append(" checked=\\"checked\\"")
else tag.append(" checked")
}
if (xhtml) tag.append(" />")
else tag.append(">")
tag.toString
}
/**
* ラジオボタンタグ生成。
*
* @param obj 現在値オブジェクト
* @param パラメータ
* @return タグ
*/
protected def radioTag(obj: Any, param: NativeObject): String = {
checkRequiredParam(param, List(PARAM_VALUE))
val tag = new StringBuilder("<input type=\\"radio\\"").append(createAttrByParam(param))
val xhtml = isXhtml
val currentValue = convertValue(obj)
if ((currentValue == null || currentValue == "") && getBoolean(param, PARAM_DEFAULT, false) ||
convertValue(param.get(PARAM_VALUE)) == currentValue) {
if (xhtml) tag.append(" checked=\\"checked\\"")
else tag.append(" checked")
}
if (xhtml) tag.append(" />")
else tag.append(">")
tag.toString
}
/**
* セレクトタグ生成。
*
* @param obj 現在値オブジェクト
* @param パラメータ
* @return タグ
*/
protected def selectTag(obj: Any, param: NativeObject): String = {
checkRequiredParam(param, List(PARAM_LIST))
val tag = new StringBuilder("<select").append(createAttrByParam(param)).append(">")
val xhtml = isXhtml
val currentValue = convertValue(obj)
if (param.containsKey(PARAM_DEFAULT)) {
val entries = param.get(PARAM_DEFAULT).asInstanceOf[NativeObject].entrySet
val it = entries.iterator
if (it.hasNext) {
val e = it.next
tag.append(optionTag(e.getKey.toString, e.getValue.toString, currentValue, xhtml))
}
}
convertOptionList(param).foreach(lv => tag.append(optionTag(lv._1, lv._2, currentValue, xhtml)))
tag.append("</select>")
tag.toString
}
/**
* 必須パラメータのチェック。
*
* @param param パラメータ
* @param names 名前のリスト
*/
private def checkRequiredParam(param: NativeObject, names: Iterable[String]) {
names.foreach(n => if (!param.containsKey(n)) throw new IllegalArgumentException(s""""$n" is required."""))
}
/**
* パラメータより属性生成。
*
* @param param パラメータ
* @return 属性タグ
*/
private def createAttrByParam(param: NativeObject) = {
val attr = new StringBuilder
param.entrySet.asScala.foreach { e =>
val key = e.getKey.toString
if (!key.startsWith("_"))
attr.append(" "). append(key).append("=\\"").append(HtmlUtil.escape(convertValue(e.getValue))).append("\\"")
}
attr.toString
}
/**
* bool 値取得。
*
* @param param パラメータ
* @param name 名前
* @param default キーが無かった場合の値
* @return bool 値
*/
private def getBoolean(param: NativeObject, name: String, default: Boolean) = {
if (!param.containsKey(name)) default
else param.get(name).asInstanceOf[Boolean]
}
/**
* 値を文字列に変換
*
* @param 値
* @return 変換後の文字列
*/
private def convertValue(obj: Any): String = obj match {
case null => null
case v: Option[_] => v.map(x => convertValue(x)).getOrElse(null)
case v: Double => new DecimalFormat("0.############").format(v)
case v => v.toString
}
/**
* オプションのリスト変換。
*
* @param param パラメータ
* @return 変換後のリスト
*/
private def convertOptionList(param: NativeObject): Seq[(String, String)] = {
param.get(PARAM_LIST) match {
case lst: NativeArray => lst.asScala.map { l =>
val e = l.asInstanceOf[NativeObject].entrySet.asScala.head
(e.getKey.toString, e.getValue.toString)
}.toSeq
case lst: Seq[_] => lst.map {
case (v, l) => (v.toString, l.toString)
}
case lst: Array[_] => lst.map {
case (v, l) => (v.toString, l.toString)
}
}
}
/**
* <option> タグ生成。
*
* @param value 値
* @param label ラベル
* @param currentValue 現在値
* @param xhtml XHTML なら true
* @return タグ
*/
private def optionTag(value: String, label: String, currentValue: String, xhtml: Boolean): String = {
val tag = new StringBuilder(s"""<option value="$value"""")
if (value == currentValue) {
if (xhtml) tag.append(" selected=\\"selected\\"")
else tag.append(" selected")
}
tag.append(s">$label</option>")
tag.toString
}
}
| VanishStandard/vsp | src/main/scala/com/v_standard/vsp/script/HtmlFunction.scala | Scala | bsd-3-clause | 6,493 |
package chapter3
import chapter3.Exercise3_10.foldLeft
object Exercise3_11 {
/**
*
*/
def product(xs: List[Int]): Int = foldLeft(xs, 1)(_ * _)
/**
*
*/
def sum(xs: List[Int]) : Int = foldLeft(xs, 0)(_ + _)
/**
*
*/
def length[A](a: List[A]): Int = foldLeft(a, 0)((a, _) => a + 1)
def main(args: Array[String]): Unit = {
val l = List(1, 2, 3, 4)
assert(product(l) == 24)
assert(sum(l) == 10)
assert(length(l) == 4)
assert(product(Nil) == 1)
assert(sum(Nil) == 0)
assert(length(Nil) == 0)
println("All Tests successful")
}
} | amolnayak311/functional-programming-in-scala | src/chapter3/Exercise3_11.scala | Scala | unlicense | 681 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.feature
import breeze.linalg.{DenseVector => BDV}
import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, Vectors}
import org.apache.spark.rdd.RDD
/**
* :: Experimental ::
* Inverse document frequency (IDF).
* The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`, where `m` is the total
* number of documents and `d(t)` is the number of documents that contain term `t`.
*
* This implementation supports filtering out terms which do not appear in a minimum number
* of documents (controlled by the variable `minDocFreq`). For terms that are not in
* at least `minDocFreq` documents, the IDF is found as 0, resulting in TF-IDFs of 0.
*
* @param minDocFreq minimum of documents in which a term
* should appear for filtering
*/
@Experimental
class IDF(val minDocFreq: Int) {
def this() = this(0)
// TODO: Allow different IDF formulations.
/**
* Computes the inverse document frequency.
* @param dataset an RDD of term frequency vectors
*/
def fit(dataset: RDD[Vector]): IDFModel = {
val idf = dataset.treeAggregate(new IDF.DocumentFrequencyAggregator(
minDocFreq = minDocFreq))(
seqOp = (df, v) => df.add(v),
combOp = (df1, df2) => df1.merge(df2)
).idf()
new IDFModel(idf)
}
/**
* Computes the inverse document frequency.
* @param dataset a JavaRDD of term frequency vectors
*/
def fit(dataset: JavaRDD[Vector]): IDFModel = {
fit(dataset.rdd)
}
}
private object IDF {
/** Document frequency aggregator. */
class DocumentFrequencyAggregator(val minDocFreq: Int) extends Serializable {
/** number of documents */
private var m = 0L
/** document frequency vector */
private var df: BDV[Long] = _
def this() = this(0)
/** Adds a new document. */
def add(doc: Vector): this.type = {
if (isEmpty) {
df = BDV.zeros(doc.size)
}
doc match {
case SparseVector(size, indices, values) =>
val nnz = indices.size
var k = 0
while (k < nnz) {
if (values(k) > 0) {
df(indices(k)) += 1L
}
k += 1
}
case DenseVector(values) =>
val n = values.size
var j = 0
while (j < n) {
if (values(j) > 0.0) {
df(j) += 1L
}
j += 1
}
case other =>
throw new UnsupportedOperationException(
s"Only sparse and dense vectors are supported but got ${other.getClass}.")
}
m += 1L
this
}
/** Merges another. */
def merge(other: DocumentFrequencyAggregator): this.type = {
if (!other.isEmpty) {
m += other.m
if (df == null) {
df = other.df.copy
} else {
df += other.df
}
}
this
}
private def isEmpty: Boolean = m == 0L
/** Returns the current IDF vector. */
def idf(): Vector = {
if (isEmpty) {
throw new IllegalStateException("Haven't seen any document yet.")
}
val n = df.length
val inv = new Array[Double](n)
var j = 0
while (j < n) {
/*
* If the term is not present in the minimum
* number of documents, set IDF to 0. This
* will cause multiplication in IDFModel to
* set TF-IDF to 0.
*
* Since arrays are initialized to 0 by default,
* we just omit changing those entries.
*/
if(df(j) >= minDocFreq) {
inv(j) = math.log((m + 1.0) / (df(j) + 1.0))
}
j += 1
}
Vectors.dense(inv)
}
}
}
/**
* :: Experimental ::
* Represents an IDF model that can transform term frequency vectors.
*/
@Experimental
class IDFModel private[mllib] (val idf: Vector) extends Serializable {
/**
* Transforms term frequency (TF) vectors to TF-IDF vectors.
*
* If `minDocFreq` was set for the IDF calculation,
* the terms which occur in fewer than `minDocFreq`
* documents will have an entry of 0.
*
* @param dataset an RDD of term frequency vectors
* @return an RDD of TF-IDF vectors
*/
def transform(dataset: RDD[Vector]): RDD[Vector] = {
val bcIdf = dataset.context.broadcast(idf)
dataset.mapPartitions(iter => iter.map(v => IDFModel.transform(bcIdf.value, v)))
}
/**
* Transforms a term frequency (TF) vector to a TF-IDF vector
*
* @param v a term frequency vector
* @return a TF-IDF vector
*/
def transform(v: Vector): Vector = IDFModel.transform(idf, v)
/**
* Transforms term frequency (TF) vectors to TF-IDF vectors (Java version).
* @param dataset a JavaRDD of term frequency vectors
* @return a JavaRDD of TF-IDF vectors
*/
def transform(dataset: JavaRDD[Vector]): JavaRDD[Vector] = {
transform(dataset.rdd).toJavaRDD()
}
}
private object IDFModel {
/**
* Transforms a term frequency (TF) vector to a TF-IDF vector with a IDF vector
*
* @param idf an IDF vector
* @param v a term frequence vector
* @return a TF-IDF vector
*/
def transform(idf: Vector, v: Vector): Vector = {
val n = v.size
v match {
case SparseVector(size, indices, values) =>
val nnz = indices.size
val newValues = new Array[Double](nnz)
var k = 0
while (k < nnz) {
newValues(k) = values(k) * idf(indices(k))
k += 1
}
Vectors.sparse(n, indices, newValues)
case DenseVector(values) =>
val newValues = new Array[Double](n)
var j = 0
while (j < n) {
newValues(j) = values(j) * idf(j)
j += 1
}
Vectors.dense(newValues)
case other =>
throw new UnsupportedOperationException(
s"Only sparse and dense vectors are supported but got ${other.getClass}.")
}
}
}
| trueyao/spark-lever | mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala | Scala | apache-2.0 | 6,799 |
package edu.berkeley.nlp.entity.coref
import scala.collection.JavaConverters._
import edu.berkeley.nlp.entity.GUtil
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.util.Random
import edu.berkeley.nlp.entity.sem.SemClass._
import edu.berkeley.nlp.futile.util.Counter
import edu.berkeley.nlp.futile.util.Iterators
import edu.berkeley.nlp.futile.util.Logger
import edu.mit.jwi.item.Pointer
import edu.berkeley.nlp.entity.sem.SemClass
import edu.berkeley.nlp.entity.WordNetInterfacer
class DocumentGraph(val corefDoc: CorefDoc,
val addToFeaturizer: Boolean) {
// addToFeaturizer should be true for train documents (if a feature is unseen on
// these, we add it to the featurizer) and false for dev/test documents
// By convention: a feature vector is empty if it has been pruned
val emptyIntArray = Array[Int]();
var cachedFeats = new Array[Array[Array[Int]]](corefDoc.numPredMents);
for (i <- 0 until corefDoc.numPredMents) {
cachedFeats(i) = Array.fill(i+1)(emptyIntArray);
}
// These are just here so we don't have to reinstantiate them; they should
// be overwritten every time the weights change (which is all the time)
val cachedScoreMatrix = new Array[Array[Float]](corefDoc.numPredMents);
val cachedMarginalMatrix = new Array[Array[Float]](corefDoc.numPredMents);
for (i <- 0 until corefDoc.numPredMents) {
cachedScoreMatrix(i) = Array.fill(i+1)(0.0F);
cachedMarginalMatrix(i) = Array.fill(i+1)(0.0F);
}
// Only used for DocumentInferencerRahman
val cachedMentClusterMapping = new MentClusterMapping(corefDoc.numPredMents);
var cachedFeaturizer: PairwiseIndexingFeaturizer = null;
var cacheEmpty = true;
// If an edge is pruned, it will never be featurized
var prunedEdges = new Array[Array[Boolean]](corefDoc.numPredMents);
for (i <- 0 until prunedEdges.size) {
prunedEdges(i) = Array.fill(i+1)(false);
}
// Cached information for feature computation
val storedClusterPosteriors = new ArrayBuffer[Array[Array[Float]]]();
val storedDistributedLabels = new ArrayBuffer[Array[Array[Int]]]();
val storedSemClass: Array[Option[SemClass]] = Array.tabulate(this.size)(i => None);
val storedRelsBetter = new Array[HashMap[Seq[Pointer],Set[String]]](this.size);
val storedRelsBetterCumulative = new Array[HashMap[Seq[Pointer],Set[String]]](this.size);
val cachedMentionHeadMatchStatus: Array[Option[Boolean]] = Array.tabulate(this.size)(i => None);
// WordNetInterfacer so the featurizer can find it if it needs to
var cachedWni: WordNetInterfacer = null;
def size() = corefDoc.numPredMents
def getMention(idx: Int) = corefDoc.predMentions(idx);
def getMentions() = corefDoc.predMentions;
def getOraclePredClustering() = corefDoc.getOraclePredClustering;
def getMentionStrAndContext(idx: Int): String = {
val ment = getMention(idx);
val mentionStart = ment.startIdx;
val mentionEnd = ment.endIdx;
val sentence = corefDoc.rawDoc.words(ment.sentIdx);
val contextStart = Math.max(0, mentionStart - 3);
val contextEnd = Math.min(mentionEnd + 3, sentence.size);
(sentence.slice(contextStart, mentionStart).foldLeft("")(_ + " " + _) + " [" + sentence.slice(mentionStart, mentionEnd).foldLeft("")(_ + " " + _) +
"] " + sentence.slice(mentionEnd, contextEnd).foldLeft("")(_ + " " + _)).trim();
}
def isGoldNoPruning(currIdx: Int, antecedentIdx: Int) = getGoldAntecedentsNoPruning(currIdx).contains(antecedentIdx);
def isGoldCurrentPruning(currIdx: Int, antecedentIdx: Int) = getGoldAntecedentsUnderCurrentPruning(currIdx).contains(antecedentIdx);
def isPruned(currIdx: Int, antecedentIdx: Int): Boolean = prunedEdges(currIdx)(antecedentIdx);
def getPrunedDomain(idx: Int, gold: Boolean): Array[Int] = {
val currAntecedents = getGoldAntecedentsUnderCurrentPruning(idx);
val domainSeq = new ArrayBuffer[Int]();
for (j <- 0 to idx) {
if (!isPruned(idx, j) && (!gold || currAntecedents.contains(j))) {
domainSeq += j;
}
}
domainSeq.toArray;
}
def pruneEdgesMentDistanceSentDistance(maxBackptrMentDistance: Int, maxPronounSentDistance: Int) {
for (i <- 0 until prunedEdges.size) {
val iSentIdx = getMention(i).sentIdx;
for (j <- 0 to i) {
val jSentIdx = getMention(j).sentIdx;
if (j < i - maxBackptrMentDistance || (getMention(i).mentionType == MentionType.PRONOMINAL && iSentIdx - jSentIdx > maxPronounSentDistance)) {
prunedEdges(i)(j) = true;
cachedFeats(i)(j) = emptyIntArray;
}
}
}
}
def pruneEdgesModel(model: PairwiseScorer, logPruningThreshold: Double) {
for (i <- 0 until prunedEdges.size) {
val scores = (0 to i).map(j => model.score(this, i, j, false));
val bestIdx = GUtil.argMaxIdxFloat(scores);
for (j <- 0 to i) {
if (scores(j) < scores(bestIdx) + logPruningThreshold) {
prunedEdges(i)(j) = true;
cachedFeats(i)(j) = emptyIntArray;
}
}
}
}
def computePruningStats(): PruningStats = {
var totalMentions = 0;
var totalAnaphoricMentions = 0;
var totalEdges = 0;
var edgesPruned = 0;
var numGoldBackptrs = 0;
var numGoldBackptrsPruned = 0;
var numAllBackptrsPruned = 0;
var numAnaphoricAllBackptrsPruned = 0;
for (i <- 0 until this.size) {
totalMentions += 1;
val thisAntecedentsNoPruning = getGoldAntecedentsNoPruning(i);
val thisAntecedentsWithPruning = getGoldAntecedentsUnderCurrentPruningOrEmptySet(i);
totalEdges += (i+1);
edgesPruned += prunedEdges(i).foldRight(0)((pruned: Boolean, value: Int) => if (pruned) value + 1 else value);
val goldAntecedentIsSelf = thisAntecedentsNoPruning.size == 1 && thisAntecedentsNoPruning(0) == i;
val allAntecedentsPruned = thisAntecedentsWithPruning.size == 0;
totalAnaphoricMentions += (if (goldAntecedentIsSelf) 0 else 1);
numGoldBackptrs += thisAntecedentsNoPruning.size;
val numAntecedentsPruned = thisAntecedentsNoPruning.size - thisAntecedentsWithPruning.size;
numGoldBackptrsPruned += numAntecedentsPruned;
numAllBackptrsPruned += (if (allAntecedentsPruned) 1 else 0);
numAnaphoricAllBackptrsPruned += (if (!goldAntecedentIsSelf && allAntecedentsPruned) 1 else 0);
}
new PruningStats(totalMentions, totalAnaphoricMentions, totalEdges, edgesPruned, numGoldBackptrs, numGoldBackptrsPruned, numAllBackptrsPruned, numAnaphoricAllBackptrsPruned);
}
def getGoldClustersNoPruning(): Seq[Seq[Mention]] = {
val allClusters = new ArrayBuffer[Seq[Mention]]();
val oracleClustering = corefDoc.getOraclePredClustering
for (cluster <- oracleClustering.clusters) {
// val clusterIndices = cluster.asScala.map(_.mentionID).toSeq;
// val clusterIndices2 = cluster.asScala.map(doc.predMentions.indexOf(_)).toSeq;
// require(clusterIndices == clusterIndices2);
allClusters += cluster.map(getMention(_));
}
allClusters;
}
def getAllAntecedentsCurrentPruning(idx: Int): Seq[Int] = {
val antecedents = new ArrayBuffer[Int];
for (i <- 0 to idx) {
if (!prunedEdges(idx)(i)) {
antecedents += i;
}
}
antecedents;
}
def getGoldAntecedentsNoPruning(): Array[Seq[Int]] = {
(0 until this.size).map(getGoldAntecedentsNoPruning(_)).toArray;
}
def getGoldAntecedentsNoPruning(idx: Int): Seq[Int] = {
val oracleClustering = corefDoc.getOraclePredClustering
val antecedents = oracleClustering.getAllAntecedents(idx);
if (antecedents.isEmpty) Seq(idx) else antecedents;
}
// This and the following return the set of allowed antecedents if all gold
// antecedents have been pruned; effectively this ignores examples where
// there is no gold. Always returns nonempty.
def getGoldAntecedentsUnderCurrentPruning(): Array[Seq[Int]] = {
(0 until this.size).map(getGoldAntecedentsUnderCurrentPruning(_)).toArray;
}
def getGoldAntecedentsUnderCurrentPruning(idx: Int): Seq[Int] = {
val oracleClustering = corefDoc.getOraclePredClustering
val antecedentsRaw = oracleClustering.getAllAntecedents(idx);
val antecedents = if (antecedentsRaw.isEmpty) Seq(idx) else antecedentsRaw;
val unprunedAntecedents = antecedents.filter(j => !prunedEdges(idx)(j))
if (unprunedAntecedents.isEmpty) {
// This is a little inefficient but this code isn't called that much (extremely rare in coarse pass
// and generally not called for nonanaphoric guys, and most things are nonanaphoric)
val allUnprunedBackptrs = prunedEdges(idx).zipWithIndex.filter((prunedAndIdx) => !prunedAndIdx._1).map(_._2).toSeq;
allUnprunedBackptrs
} else {
unprunedAntecedents;
}
}
// This and the following return the set of unpruned antecedents, possibly empty
def getGoldAntecedentsUnderCurrentPruningOrEmptySet(): Array[Seq[Int]] = {
(0 until this.size).map(getGoldAntecedentsUnderCurrentPruningOrEmptySet(_)).toArray;
}
def getGoldAntecedentsUnderCurrentPruningOrEmptySet(idx: Int): Seq[Int] = {
val oracleClustering = corefDoc.getOraclePredClustering
val antecedentsRaw = oracleClustering.getAllAntecedents(idx);
val antecedents = if (antecedentsRaw.isEmpty) Seq(idx) else antecedentsRaw;
val unprunedAntecedents = antecedents.filter(j => !prunedEdges(idx)(j))
unprunedAntecedents;
}
// N.B. The matrices returned by this method are volatile. The feats one hangs around
// unless you refeaturize, but the other one gets mutated every time you call this
// method (though obviously it's only different if you prune or if the weights have changed).
def featurizeIndexAndScoreNonPrunedUseCache(scorer: PairwiseScorer): (Array[Array[Array[Int]]], Array[Array[Float]]) = {
val featsChart = featurizeIndexNonPrunedUseCache(scorer.featurizer);
val scoreChart = cachedScoreMatrix;
for (i <- 0 until corefDoc.numPredMents) {
for (j <- 0 to i) {
if (!prunedEdges(i)(j)) {
scoreChart(i)(j) = GUtil.scoreIndexedFeats(featsChart(i)(j), scorer.weights);
} else {
scoreChart(i)(j) = Float.NegativeInfinity;
}
}
}
(featsChart, scoreChart)
}
def scoreUseCache(scorer: PairwiseScorer, mentIdx: Int): (Array[Array[Int]], Array[Float]) = {
val featsChart = featurizeIndexNonPrunedUseCache(scorer.featurizer)(mentIdx)
val scoreVec = cachedScoreMatrix(mentIdx);
for (j <- 0 to mentIdx) {
if (!prunedEdges(mentIdx)(j)) {
scoreVec(j) = GUtil.scoreIndexedFeats(featsChart(j), scorer.weights);
} else {
scoreVec(j) = Float.NegativeInfinity;
}
}
(featsChart, scoreVec)
}
// How does this know whether or not to add features? The private variable addToFeatures...
// a bit of a hack...
def featurizeIndexNonPrunedUseCache(featurizer: PairwiseIndexingFeaturizer): Array[Array[Array[Int]]] = {
if (cacheEmpty || featurizer != cachedFeaturizer) {
cachedFeats = featurizeIndexNonPruned(featurizer);
cachedFeaturizer = featurizer;
cacheEmpty = false;
}
cachedFeats;
}
private def featurizeIndexNonPruned(featurizer: PairwiseIndexingFeaturizer): Array[Array[Array[Int]]] = {
val featsChart = new Array[Array[Array[Int]]](corefDoc.numPredMents);
for (i <- 0 until corefDoc.numPredMents) {
featsChart(i) = new Array[Array[Int]](i+1);
for (j <- 0 to i) {
if (!prunedEdges(i)(j)) {
featsChart(i)(j) = featurizer.featurizeIndex(this, i, j, addToFeaturizer);
}
}
// Logger.logss(i + ": " + featsChart(i).map(_.head).toSeq);
}
featsChart;
}
def scoreNonPrunedUseCache(weights: Array[Float]): Array[Array[Float]] = {
val featsChart = cachedFeats;
val scoreChart = cachedScoreMatrix;
for (i <- 0 until corefDoc.numPredMents) {
for (j <- 0 to i) {
if (!prunedEdges(i)(j)) {
require(featsChart(i)(j).size > 0);
scoreChart(i)(j) = GUtil.scoreIndexedFeats(featsChart(i)(j), weights);
} else {
scoreChart(i)(j) = Float.NegativeInfinity;
}
}
}
scoreChart
}
def setPrunedEdges(prunedEdges: Array[Array[Boolean]]) {
this.prunedEdges = prunedEdges;
for (i <- 0 until prunedEdges.size) {
for (j <- 0 until prunedEdges(i).size) {
if (prunedEdges(i)(j)) {
cachedFeats(i)(j) = emptyIntArray;
}
}
}
}
def clearFeatureCache() {
for (i <- 0 until cachedFeats.size) {
for (j <- 0 until cachedFeats(i).size) {
cachedFeats(i)(j) = emptyIntArray;
}
}
}
def printAverageFeatureCountInfo() {
var numerAnaphoric = 0;
var denomAnaphoric = 0;
var numerNonanaphoric = 0;
var denomNonanaphoric = 0;
for (i <- 0 until cachedFeats.size) {
for (j <-0 until cachedFeats(i).size) {
if (!prunedEdges(i)(j)) {
if (i != j) {
numerAnaphoric += cachedFeats(i)(j).size;
denomAnaphoric += 1;
} else {
numerNonanaphoric += cachedFeats(i)(j).size;
denomNonanaphoric += 1;
}
}
}
}
Logger.logss("Avg feature counts anaphoric: " + numerAnaphoric.toDouble/denomAnaphoric.toDouble);
Logger.logss("Avg feature counts nonanaphoric: " + numerNonanaphoric.toDouble/denomNonanaphoric.toDouble);
}
// Caching various information that we might want to use later
// def computeAndStoreClusterPosteriors(clusterer: Clusterer) {
// val clusterPosteriors = new Array[Array[Float]](this.size);
// for (i <- 0 until size) {
//// val currMent = doc.predMentions.get(i);
//// clusterPosteriors(i) = clusterer.computeClusterPosteriors(getMentionInContext(currMent.sentNum, currMent.startIndex, currMent.endIndex, currMent.headIndex));
//
// val currMent = getMention(i);
// clusterPosteriors(i) = clusterer.computeClusterPosteriors(getMentionInContext(currMent.sentIdx, currMent.startIdx, currMent.endIdx, currMent.headIdx));
// // Do a little smoothing on the posteriors
// (0 until clusterPosteriors(i).size).foreach(j => clusterPosteriors(i)(j) += 1e-10);
// }
// this.storedClusterPosteriors += clusterPosteriors;
// }
//
// def computeAndStoreDistributedLabels(clustererIdx: Int, lowerThresholds: Array[Float], upperThresholds: Array[Float]) {
// val distributedLabels = new Array[Array[Int]](this.size);
// for (i <- 0 until size) {
// distributedLabels(i) = new Array[Int](this.storedClusterPosteriors(clustererIdx)(i).size);
// for (j <- 0 until this.storedClusterPosteriors(clustererIdx)(i).size) {
// val posterior = this.storedClusterPosteriors(clustererIdx)(i)(j);
// distributedLabels(i)(j) = if (posterior > upperThresholds(j)) 1 else if (posterior < lowerThresholds(j)) 0 else -1;
// }
// }
// this.storedDistributedLabels += distributedLabels;
// }
def numClusterers = storedClusterPosteriors.size;
def numClusters(clustererIdx: Int) = storedClusterPosteriors(clustererIdx)(0).size;
def getClusterPosteriors(clustererIdx: Int, mentIdx: Int): Array[Float] = {
storedClusterPosteriors(clustererIdx)(mentIdx);
}
def getBestCluster(clustererIdx: Int, mentIdx: Int): Int = {
var bestScore = Float.NegativeInfinity;
var bestIdx = -1;
for (i <- 0 until storedClusterPosteriors(clustererIdx)(mentIdx).length) {
if (storedClusterPosteriors(clustererIdx)(mentIdx)(i) > bestScore) {
bestScore = storedClusterPosteriors(clustererIdx)(mentIdx)(i);
bestIdx = i;
}
}
bestIdx;
}
def computeAndStorePhiPosteriors(useNumber: Boolean, useGender: Boolean, useNert: Boolean) {
if (useNumber) {
computeAndStorePhiPosterior((ment: Mention) => ment.number.ordinal(), Number.values().size - 1, Number.UNKNOWN.ordinal())
}
if (useGender) {
computeAndStorePhiPosterior((ment: Mention) => ment.gender.ordinal(), Gender.values().size - 1, Gender.UNKNOWN.ordinal())
}
}
def computeAndStorePhiPosterior(fcn: (Mention => Int), domainSize: Int, unknown: Int) {
val EstimatorConfidence = 0.75F;
val posteriors = new Array[Array[Float]](this.size);
for (i <- 0 until size) {
val idx = fcn(getMention(i));
if (idx == unknown || idx == -1) {
posteriors(i) = Array.tabulate(domainSize)(j => 1.0F/domainSize);
} else if (idx >= domainSize) {
throw new RuntimeException("Bad idx: " + idx + " for domain size " + domainSize + " " + getMention(i).nerString);
} else {
posteriors(i) = Array.tabulate(domainSize)(j => (1.0F - EstimatorConfidence)/domainSize);
posteriors(i)(idx) += EstimatorConfidence;
}
}
this.storedClusterPosteriors += posteriors;
}
def getSemClassUseCache(wordNetInterfacer: WordNetInterfacer, idx: Int) = {
if (!storedSemClass(idx).isDefined) {
storedSemClass(idx) = Some(SemClass.getSemClass(getMention(idx).headStringLc, getMention(idx).nerString, wordNetInterfacer))
}
storedSemClass(idx).getOrElse(SemClass.Other);
}
def getWordNetRelsBetterUseCache(wordNetInterfacer: WordNetInterfacer, rels: Seq[Pointer], idx: Int) = {
if (storedRelsBetter(idx) == null) {
storedRelsBetter(idx) = new HashMap[Seq[Pointer],Set[String]];
}
if (!storedRelsBetter(idx).contains(rels)) {
storedRelsBetter(idx).put(rels, wordNetInterfacer.getWordsOnSynsetRelation(getMention(idx), rels).toSet);
}
storedRelsBetter(idx)(rels);
}
def getWordNetRelsBetterCumulativeUseCache(wordNetInterfacer: WordNetInterfacer, rels: Seq[Pointer], idx: Int) = {
if (storedRelsBetterCumulative(idx) == null) {
storedRelsBetterCumulative(idx) = new HashMap[Seq[Pointer],Set[String]];
}
if (!storedRelsBetterCumulative(idx).contains(rels)) {
storedRelsBetterCumulative(idx).put(rels, wordNetInterfacer.getWordsUpToSynsetRelation(getMention(idx), rels).toSet);
}
storedRelsBetterCumulative(idx)(rels);
}
def getHeadMatchStatus(idx: Int) = {
if (!cachedMentionHeadMatchStatus(idx).isDefined) {
cachedMentionHeadMatchStatus(idx) = Some((0 until idx).map(i => (getMention(i).headStringLc == getMention(idx).headStringLc)).foldLeft(false)(_ || _));
}
cachedMentionHeadMatchStatus(idx).getOrElse(false);
}
def cacheWordNetInterfacer(wni: WordNetInterfacer) = {
this.cachedWni = wni;
}
}
case class PruningStats(val totalMentions: Int,
val totalAnaphoricMentions: Int,
val totalEdges: Int,
val edgesPruned: Int,
val numGoldBackptrs: Int,
val numGoldBackptrsPruned: Int,
val numAllBackptrsPruned: Int,
val numAnaphoricAllBackptrsPruned: Int) {
def add(other: PruningStats) = { new PruningStats(this.totalMentions + other.totalMentions,
this.totalAnaphoricMentions + other.totalAnaphoricMentions,
this.totalEdges + other.totalEdges,
this.edgesPruned + other.edgesPruned,
this.numGoldBackptrs + other.numGoldBackptrs,
this.numGoldBackptrsPruned + other.numGoldBackptrsPruned,
this.numAllBackptrsPruned + other.numAllBackptrsPruned,
this.numAnaphoricAllBackptrsPruned + other.numAnaphoricAllBackptrsPruned); }
override def toString(): String = {
"totalMentions: " + this.totalMentions + ", totalAnaphoricMentions: " + this.totalAnaphoricMentions + ", totalEdges: " + this.totalEdges +
", edgesPruned: " + this.edgesPruned + ", numGoldBackptrs: " + this.numGoldBackptrs +
", numGoldBackptrsPruned: " + this.numGoldBackptrsPruned + ", numAllBackptrsPruned: " +
this.numAllBackptrsPruned + ", numAnaphoricAllBackptrsPruned: " + this.numAnaphoricAllBackptrsPruned;
}
};
object DocumentGraph {
// def pruneEdgesAll(docGraphs: Seq[DocumentGraph], pruningStrategy: PruningStrategy, scorer: PairwiseScorer) {
// if (pruningStrategy.strategy.startsWith("distance")) {
// val args = pruningStrategy.getDistanceArgs();
// pruneEdgesAll(docGraphs, (doc: DocumentGraph) => doc.pruneEdgesMentDistanceSentDistance(args._1, args._2));
// } else if (pruningStrategy.strategy.startsWith("models")) {
// val models = GUtil.load(pruningStrategy.getModelPath).asInstanceOf[(HashMap[UID,Int], ArrayBuffer[PairwiseScorer])];
// val threshold = pruningStrategy.getModelLogRatio;
// pruneEdgesAll(docGraphs, (doc: DocumentGraph) => {
// if (models._1.contains(doc.corefDoc.rawDoc.uid)) {
// doc.pruneEdgesModel(models._2(models._1(doc.corefDoc.rawDoc.uid)), threshold);
// } else {
// doc.pruneEdgesModel(models._2.head, threshold);
// }
// });
// } else {
// throw new RuntimeException("Unrecognized pruning strategy: " + pruningStrategy);
// }
// }
//
// def pruneEdgesAll(docGraphs: Seq[DocumentGraph], docsToFolds: HashMap[UID,Int], foldModels: ArrayBuffer[PairwiseScorer], threshold: Float) {
// pruneEdgesAll(docGraphs, (doc: DocumentGraph) => {
// if (docsToFolds.contains(doc.corefDoc.rawDoc.uid)) {
// doc.pruneEdgesModel(foldModels(docsToFolds(doc.corefDoc.rawDoc.uid)), threshold);
// } else {
// doc.pruneEdgesModel(foldModels.head, threshold);
// }
// });
// }
//
// private def pruneEdgesAll(docGraphs: Seq[DocumentGraph], pruningFcn: (DocumentGraph) => Unit) {
// var pruningStats = new PruningStats(0, 0, 0, 0, 0, 0, 0, 0);
// for (docGraph <- docGraphs) {
// pruningFcn(docGraph);
// pruningStats = pruningStats.add(docGraph.computePruningStats());
// }
// Logger.logss("Pruning result: " + pruningStats);
// }
}
| malcolmgreaves/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/coref/DocumentGraph.scala | Scala | gpl-3.0 | 22,170 |
package spinoco.protocol.http.header.value
import scodec.Codec
import scodec.codecs._
import spinoco.protocol.http.codec.helper._
sealed trait HttpEncodingRange { self =>
import HttpEncodingRange._
def qValue: Option[Float]
def updateQValue(qValue:Option[Float]): HttpEncodingRange = self match {
case _: Any => Any(qValue)
case one: One => one.copy(qValue = qValue)
}
}
object HttpEncodingRange {
sealed case class Any(qValue: Option[Float]) extends HttpEncodingRange
sealed case class One(encoding: HttpEncoding, qValue: Option[Float]) extends HttpEncodingRange
val codec : Codec[HttpEncodingRange] = {
val encCodec:Codec[HttpEncodingRange] = {
choice(
starCodec.xmap[Any](_ => Any(None), _ => ()).upcast
, HttpEncoding.codec.xmap[One](enc => One(enc, None), _.encoding).upcast
)
}
parametrized(semicolon, encCodec, qValueCodec).xmap(
{ case (enc, qv) => enc.updateQValue(qv) }
, enc => enc -> enc.qValue
)
}
}
| Spinoco/protocol | http/src/main/scala/spinoco/protocol/http/header/value/HttpEncodingRange.scala | Scala | mit | 1,004 |
package ingraph.ire.messages
import ingraph.ire.datatypes.Tuple
trait ForkingForwarder extends Forwarder {
val children: Vector[ReteMessage => Unit]
if (children.size < 2)
throw new IllegalArgumentException("use base class for 1 child node")
def forwardHashFunction(n: Tuple): Int
def forward(cs: ChangeSet) = {
cs.positive.groupBy(
node => Math.abs(forwardHashFunction(node)) % children.size).foreach(
kv => if (kv._2.nonEmpty) children(kv._1)(ChangeSet(positive = kv._2)))
cs.negative.groupBy(
node => Math.abs(forwardHashFunction(node)) % children.size).foreach(
kv => if (kv._2.nonEmpty) children(kv._1)(ChangeSet(negative = kv._2)))
}
def forward(t: TerminatorMessage) = children.foreach(_ (t))
}
trait SingleForwarder extends Forwarder {
val next: ReteMessage => Unit
def forward(cs: ChangeSet) = {
if (cs.positive.nonEmpty || cs.negative.nonEmpty)
next(cs)
}
def forward(terminator: TerminatorMessage) = next(terminator)
}
trait Forwarder {
val name: String
def forward(cs: ChangeSet)
def forward(terminator: TerminatorMessage)
}
| FTSRG/ingraph | ire/src/main/scala/ingraph/ire/messages/Forwarder.scala | Scala | epl-1.0 | 1,122 |
/*
* Copyright 2013 Michael Krolikowski
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mkroli.dns4s.section.resource
import com.github.mkroli.dns4s.MessageBuffer
import com.github.mkroli.dns4s.section.Resource
case class CNameResource(cname: String) extends Resource {
def apply(buf: MessageBuffer) = buf.putDomainName(cname)
}
object CNameResource {
def apply(buf: MessageBuffer) = new CNameResource(buf.getDomainName())
}
| mesosphere/dns4s | core/src/main/scala/com/github/mkroli/dns4s/section/resource/CNameResource.scala | Scala | apache-2.0 | 966 |
/*start*/(123: Int).toInt/*end*/
//Int | LPTK/intellij-scala | testdata/typeInference/bugs4/SCL2663.scala | Scala | apache-2.0 | 38 |
package io.github.facaiy.dag.parallel
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.Duration
import io.github.facaiy.dag.core.{DAGNode, InputNode, InternalNode, LazyCell}
import io.github.facaiy.dag.Result
/**
* Created by facai on 6/2/17.
*/
object Implicits { self =>
private[dag] def toParallel[K, V](nodes: Seq[DAGNode[K, V]])
(implicit executor: ExecutionContext
): Seq[DAGNode[K, Future[V]]] = {
def toFutureCell(n: DAGNode[K, V]): DAGNode[K, Future[V]] =
n match {
case InputNode(k, f) =>
// TODO(facai), use `compose` to combine `f` and `Future.apply`
val g = () => Future(f())
InputNode(k, g)
case InternalNode(k, ds, f) =>
val g = (xs: Seq[Future[V]]) => Future.sequence(xs).map(f)
InternalNode(k, ds, g)
}
nodes.map(toFutureCell)
}
private[dag] def toLazyNetwork[K, V](nodes: Seq[DAGNode[K, V]])
(executor: ExecutionContext): Map[K, LazyCell[Future[V]]] = {
import io.github.facaiy.dag.serial
serial.Implicits.toLazyNetwork(self.toParallel(nodes)(executor))
}
import scala.language.implicitConversions
implicit class Nodes[K, V](nodes: Seq[DAGNode[K, V]]) {
import scala.concurrent.ExecutionContext.Implicits.global
def toLazyNetwork: Map[K, LazyCell[Future[V]]] = this.toLazyNetwork(global)
def toLazyNetwork(executor: ExecutionContext): Map[K, LazyCell[Future[V]]] =
self.toLazyNetwork(nodes)(executor)
}
implicit class ParResult[A](lc: LazyCell[Future[A]]) extends Result[A] {
def getValue: A = getValue(Duration.Inf)
def getValue(duration: Duration): A =
Await.result(lc.get(), duration)
}
}
| facaiy/DAG-lite | src/main/scala/io/github/facaiy/dag/parallel/Implicits.scala | Scala | mit | 1,821 |
package special.sigma
import java.util
import org.ergoplatform.SigmaConstants.ScriptCostLimit
import org.ergoplatform.dsl.{ContractSpec, SigmaContractSyntax, TestContractSpec}
import org.ergoplatform.validation.{SigmaValidationSettings, ValidationRules}
import org.ergoplatform._
import org.scalacheck.Arbitrary._
import org.scalacheck.Gen.frequency
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest.prop.PropertyChecks
import org.scalatest.{Matchers, PropSpec}
import scalan.RType
import scalan.RType._
import scalan.util.BenchmarkUtil
import scalan.util.Extensions._
import scalan.util.CollectionUtil._
import sigmastate.SType.AnyOps
import sigmastate.Values.{ByteArrayConstant, Constant, ConstantNode, ErgoTree, IntConstant, SValue}
import sigmastate.basics.DLogProtocol.{DLogProverInput, ProveDlog}
import sigmastate.basics.{SigmaProtocol, SigmaProtocolCommonInput, SigmaProtocolPrivateInput}
import sigmastate.eval.Extensions._
import sigmastate.eval.{CompiletimeIRContext, CostingBox, CostingDataContext, Evaluation, IRContext, SigmaDsl}
import sigmastate.helpers.TestingHelpers._
import sigmastate.helpers.{ErgoLikeContextTesting, ErgoLikeTestInterpreter, SigmaPPrint}
import sigmastate.interpreter.EvalSettings.{AotEvaluationMode, EvaluationMode, JitEvaluationMode}
import sigmastate.interpreter.Interpreter.{ScriptEnv, VerificationResult}
import sigmastate.interpreter._
import sigmastate.lang.Terms.ValueOps
import sigmastate.serialization.ValueSerializer
import sigmastate.serialization.generators.ObjectGenerators
import sigmastate.utils.Helpers._
import sigmastate.utxo.{DeserializeContext, DeserializeRegister}
import sigmastate.{JitCost, SSigmaProp, SType, eval}
import special.collection.{Coll, CollType}
import spire.syntax.all.cfor
import scala.collection.mutable
import scala.math.Ordering
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
class SigmaDslTesting extends PropSpec
with PropertyChecks
with Matchers
with SigmaTestingData with SigmaContractSyntax
with ObjectGenerators { suite =>
override def Coll[T](items: T*)(implicit cT: RType[T]): Coll[T] = super.Coll(items:_*)
lazy val spec: ContractSpec = TestContractSpec(suite)(new TestingIRContext)
override def contractEnv: ScriptEnv = Map()
def createIR(): IRContext = new TestingIRContext {
override val okPrintEvaluatedEntries: Boolean = false
override val okMeasureOperationTime: Boolean = true
}
def checkEq[A,B](scalaFunc: A => B)(g: A => (B, CostDetails)): A => Try[(B, CostDetails)] = { x: A =>
val b1 = Try(scalaFunc(x)); val b2 = Try(g(x))
(b1, b2) match {
case (Success(b1), res @ Success((b2, _))) =>
assert(b1 == b2)
res
case (Failure(t1), res @ Failure(t2)) =>
val c1 = rootCause(t1).getClass
val c2 = rootCause(t2).getClass
if (c1 != c2) {
assert(c1 == c2,
s"""Different errors:
|First result: $t1
|Second result: $t2
|""".stripMargin)
}
res
case _ =>
val cause = if (b1.isFailure)
rootCause(b1.asInstanceOf[Failure[_]].exception)
else
rootCause(b2.asInstanceOf[Failure[_]].exception)
sys.error(
s"""Should succeed with the same value or fail with the same exception, but was:
|First result: $b1
|Second result: $b2
|Root cause: $cause
|""".stripMargin)
}
}
def checkEq2[A,B,R](f: (A, B) => R)(g: (A, B) => R): (A,B) => Unit = { (x: A, y: B) =>
val r1 = f(x, y); val r2 = g(x, y)
assert(r1.getClass == r2.getClass)
assert(r1 == r2)
}
def getArrayIndex(len: Int): Int = {
val index = Gen.choose(0, len - 1)
index.sample.get
}
/** Generate indices for an array of a given length.
* @return unordered array of indices with possibly repeated elements
*/
def genIndices(arrLength: Int): Gen[Array[Int]] = for {
nIndexes <- Gen.choose(0, arrLength)
indices <- Gen.containerOfN[Array, Int](nIndexes, Gen.choose(0, arrLength - 1))
} yield indices
class FeatureProvingInterpreter extends ErgoLikeTestInterpreter()(new TestingIRContext) with ProverInterpreter {
def decodeSecretInput(decimalStr: String): DLogProverInput = DLogProverInput(BigInt(decimalStr).bigInteger)
val sk1: DLogProverInput = decodeSecretInput("416167686186183758173232992934554728075978573242452195968805863126437865059")
val sk2: DLogProverInput = decodeSecretInput("34648336872573478681093104997365775365807654884817677358848426648354905397359")
val sk3: DLogProverInput = decodeSecretInput("50415569076448343263191022044468203756975150511337537963383000142821297891310")
val secrets: Seq[SigmaProtocolPrivateInput[_ <: SigmaProtocol[_], _ <: SigmaProtocolCommonInput[_]]] = {
// Note, not all secrets are used, which is required by checkVerify
// This is to make AtLeast to be unproved and thus the verify is successfull
// because of the other condition in SigmaOr (see checkVerify)
val dlogs: IndexedSeq[DLogProverInput] = Vector(sk1)
dlogs
}
val pubKeys: Seq[ProveDlog] = Vector(sk1, sk2, sk3)
.collect { case in: DLogProverInput => in.publicImage }
}
val LogScriptDefault: Boolean = false
val isNewVersion = new scala.util.DynamicVariable(false)
val predefScripts = Seq[String]()
/** Descriptor of the language feature. */
trait Feature[A, B] { feature =>
/** Script containing this feature. */
def script: String
/** Semantics of this feature in ErgoTree v1 given by scala code */
def scalaFunc: A => B
/** Semantics of this feature in ErgoTree v2 given by scala code */
def scalaFuncNew: A => B = scalaFunc
/** Expression which represents the test case code. */
def expectedExpr: Option[SValue]
/** Function that executes the feature using v4.x interpreter implementation. */
def oldImpl: () => CompiledFunc[A, B]
/** Function that executes the feature using v5.x interpreter implementation. */
def newImpl: () => CompiledFunc[A, B]
def evalSettings: EvalSettings
def printExpectedExpr: Boolean
def logScript: Boolean
/** Called to print test case expression (when it is not given).
* Can be used to create regression test cases. */
def printSuggestion(cf: CompiledFunc[_,_]): Unit = {
print(s"No expectedExpr for ")
SigmaPPrint.pprintln(cf.script, height = 150)
print("Use ")
SigmaPPrint.pprintln(cf.expr, height = 150)
println()
}
/** Checks the result of the front-end compiler against expectedExpr.
* Used to catch regression errors in front-end compiler.
*/
def checkExpectedExprIn(cf: CompiledFunc[_,_]): Boolean = {
expectedExpr match {
case Some(e) =>
if (cf.expr != null && cf.expr != e) {
printSuggestion(cf)
cf.expr shouldBe e
}
case None if printExpectedExpr =>
printSuggestion(cf)
}
true
}
/** v3 and v4 implementation*/
private var _oldF: CompiledFunc[A, B] = _
def oldF: CompiledFunc[A, B] = {
if (_oldF == null) {
_oldF = oldImpl()
checkExpectedExprIn(_oldF)
}
_oldF
}
/** v5 implementation*/
private var _newF: CompiledFunc[A, B] = _
def newF: CompiledFunc[A, B] = {
if (_newF == null) {
_newF = newImpl()
checkExpectedExprIn(_newF)
}
_newF
}
/** Compares the old and new implementations against
* semantic function (scalaFunc) on the given input.
*
* @param input data which is used to execute feature
* @param logInputOutput if true, then pretty-print input and output values
* @return result of feature execution */
def checkEquality(input: A, logInputOutput: Boolean = false): Try[(B, CostDetails)]
/** Depending on the featureType compares the old and new implementations against
* semantic function (scalaFunc) on the given input, also checking the given expected result.
*/
def checkExpected(input: A, expectedResult: Expected[B]): Unit
/** Tests this feature on the given input.
* @param input data value
* @param expectedResult the result which is expected
*/
def testCase(input: A, expectedResult: Try[B],
printTestCases: Boolean = PrintTestCasesDefault,
failOnTestVectors: Boolean = FailOnTestVectorsDefault): Unit
/** Tests this feature by embedding it in the verification script.
* @param input data value
* @param expectedResult the result values which are expected
* @see checkVerify
*/
def verifyCase(input: A, expectedResult: Expected[B],
printTestCases: Boolean = PrintTestCasesDefault,
failOnTestVectors: Boolean = FailOnTestVectorsDefault): Unit
/** Creates a new ErgoLikeContext using given [[CostingDataContext]] as template.
* Copies most of the data from ctx and the missing data is taken from the args.
* This is a helper method to be used in tests only.
*/
def createErgoLikeContext(ctx: CostingDataContext,
validationSettings: SigmaValidationSettings,
costLimit: Long,
initCost: Long
): ErgoLikeContext = {
val treeData = SigmaDsl.toAvlTreeData(ctx.lastBlockUtxoRootHash)
val dataBoxes = ctx.dataInputs.toArray.map(SigmaDsl.toErgoBox)
val boxesToSpend = ctx.inputs.toArray.map(SigmaDsl.toErgoBox)
val txInputs = boxesToSpend.map(b => Input(b.id, ProverResult.empty))
val txDataInputs = dataBoxes.map(b => DataInput(b.id))
val txOutputCandidates = ctx.outputs.toArray.map(SigmaDsl.toErgoBox)
val tx = new ErgoLikeTransaction(
txInputs, txDataInputs, txOutputCandidates.toIndexedSeq)
val selfIndex = boxesToSpend.indexWhere(b => util.Arrays.equals(b.id, ctx.selfBox.id.toArray))
val extension = ContextExtension(
values = ctx.vars.toArray.zipWithIndex.collect {
case (v, i) if v != null =>
val tpe = Evaluation.rtypeToSType(v.tVal)
i.toByte -> ConstantNode(v.value.asWrappedType, tpe)
}.toMap
)
new ErgoLikeContext(
treeData, ctx.headers, ctx.preHeader,
dataBoxes, boxesToSpend, tx, selfIndex,
extension, validationSettings, costLimit, initCost,
activatedVersionInTests)
}
/** Executes the default feature verification wrapper script for the specific ErgoTree
* version using both v4.x and v5.x interpreters.
* @param input the given test case input data
* @param expected the given expected results (values and costs)
*/
def checkVerify(input: A, expected: Expected[B]): Unit = {
val tpeA = Evaluation.rtypeToSType(oldF.tA)
val tpeB = Evaluation.rtypeToSType(oldF.tB)
// Create synthetic ErgoTree which uses all main capabilities of evaluation machinery.
// 1) first-class functions (lambdas); 2) Context variables; 3) Registers; 4) Equality
// for all types; 5) Embedding of boolean to SigmaProp; 6) Sigma propositions (&&, ||, AtLeast)
// 7) Deserialization from SELF and Context
// Every language Feature is tested as part of this wrapper script.
// Inclusion of all the features influences the expected cost estimation values
def compiledTree(prover: FeatureProvingInterpreter) = {
val code =
s"""{
| val func = ${oldF.script}
| val res1 = func(getVar[${oldF.tA.name}](1).get)
| val res2 = SELF.R4[${oldF.tB.name}].get
| sigmaProp(res1 == res2) && pkAlice
|}
""".stripMargin
val IR = new CompiletimeIRContext
val pkAlice = prover.pubKeys.head.toSigmaProp
val env = Map("pkAlice" -> pkAlice)
// Compile script the same way it is performed by applications (i.e. via Ergo Appkit)
val prop = ErgoScriptPredef.compileWithCosting(
env, code, ErgoAddressEncoder.MainnetNetworkPrefix)(IR).asSigmaProp
// Add additional oparations which are not yet implemented in ErgoScript compiler
val multisig = sigmastate.AtLeast(
IntConstant(2),
Array(
pkAlice,
DeserializeRegister(ErgoBox.R5, SSigmaProp), // deserialize pkBob
DeserializeContext(2, SSigmaProp))) // deserialize pkCarol
val header = ErgoTree.headerWithVersion(ergoTreeVersionInTests)
ErgoTree.withSegregation(header, sigmastate.SigmaOr(prop, multisig))
}
def ergoCtx(prover: FeatureProvingInterpreter, compiledTree: ErgoTree, expectedValue: B) = {
val pkBobBytes = ValueSerializer.serialize(prover.pubKeys(1).toSigmaProp)
val pkCarolBytes = ValueSerializer.serialize(prover.pubKeys(2).toSigmaProp)
val newRegisters = Map(
ErgoBox.R4 -> Constant[SType](expectedValue.asInstanceOf[SType#WrappedType], tpeB),
ErgoBox.R5 -> ByteArrayConstant(pkBobBytes)
)
val ctx = input match {
case ctx: CostingDataContext =>
// the context is passed as function argument (see func in the script)
// Since Context is singleton, we should use this instance as the basis
// for execution of verify instead of a new dummy context.
val self = ctx.selfBox.asInstanceOf[CostingBox]
val newSelf = self.copy(
ebox = updatedRegisters(self.ebox, newRegisters)
)
// We add ctx as it's own variable with id = 1
val ctxVar = eval.Extensions.toAnyValue[special.sigma.Context](ctx)(special.sigma.ContextRType)
val carolVar = eval.Extensions.toAnyValue[Coll[Byte]](pkCarolBytes.toColl)(RType[Coll[Byte]])
val newCtx = ctx
.withUpdatedVars(1 -> ctxVar, 2 -> carolVar)
.copy(
selfBox = newSelf,
inputs = {
val selfIndex = ctx.inputs.indexWhere(b => b.id == ctx.selfBox.id, 0)
ctx.inputs.updated(selfIndex, newSelf)
})
createErgoLikeContext(
newCtx,
ValidationRules.currentSettings,
ScriptCostLimit.value,
initCost = initialCostInTests.value
)
case _ =>
ErgoLikeContextTesting.dummy(
createBox(0, compiledTree, additionalRegisters = newRegisters),
activatedVersionInTests)
.withBindings(
1.toByte -> Constant[SType](input.asInstanceOf[SType#WrappedType], tpeA),
2.toByte -> ByteArrayConstant(pkCarolBytes))
.withInitCost(initialCostInTests.value)
.asInstanceOf[ErgoLikeContext]
}
ctx
}
if (expected.value.isSuccess) {
// check v4.x interpreter
val prover = new FeatureProvingInterpreter() {
override val evalSettings: EvalSettings = feature.evalSettings.copy(
evaluationMode = Some(AotEvaluationMode)
)
}
val tree = compiledTree(prover)
val ctx = ergoCtx(prover, tree, expected.value.get)
val pr = prover.prove(tree, ctx, fakeMessage).getOrThrow
val verificationCtx = ctx.withExtension(pr.extension)
// run old v4.x interpreter
val aotVerifier = new ErgoLikeTestInterpreter()(createIR()) {
override val evalSettings: EvalSettings = feature.evalSettings.copy(
evaluationMode = Some(AotEvaluationMode)
)
}
val aotRes = aotVerifier.verify(tree, verificationCtx, pr, fakeMessage)
checkExpectedResult(AotEvaluationMode, aotRes, expected.verificationCostOpt)
}
val newExpectedRes = expected.newResults(ergoTreeVersionInTests)
val newExpectedValue = newExpectedRes._1.value
if (newExpectedValue.isSuccess) {
// check v5.0 interpreter
val prover = new FeatureProvingInterpreter() {
override val evalSettings: EvalSettings = feature.evalSettings.copy(
evaluationMode = Some(JitEvaluationMode)
)
}
val tree = compiledTree(prover)
val ctx = ergoCtx(prover, tree, newExpectedValue.get)
val pr = prover.prove(tree, ctx, fakeMessage).getOrThrow
val verificationCtx = ctx.withExtension(pr.extension)
// run new v5.0 interpreter
val jitVerifier = new ErgoLikeTestInterpreter()(createIR()) {
override val evalSettings: EvalSettings = feature.evalSettings.copy(
evaluationMode = Some(JitEvaluationMode)
)
}
val jitRes = jitVerifier.verify(tree, verificationCtx, pr, fakeMessage)
val newCost = newExpectedRes._1.verificationCost
checkExpectedResult(JitEvaluationMode, jitRes, newCost)
if (newCost.isEmpty) {
val res = jitRes.getOrThrow
// new verification cost expectation is missing, print out actual cost results
if (jitVerifier.evalSettings.printTestVectors) {
printCostTestVector("Missing New Cost", input, res._2.toInt)
}
}
}
}
/** Prints the actual cost test vector (when it is not defined). */
private def printCostTestVector(title: String, input: Any, actualCost: Int) = {
println(
s"""-- $title ----------------------
|ErgoTreeVersion: $ergoTreeVersionInTests
|Input: $input
|Script: $script
|Actual New Verification Cost: $actualCost
|""".stripMargin)
}
private def checkEqualResults(res1: Try[VerificationResult], res2: Try[VerificationResult]): Unit = {
(res1, res2) match {
case (Success((v1, c1)), Success((v2, c2))) =>
v1 shouldBe v2
case (Failure(t1), Failure(t2)) =>
rootCause(t1) shouldBe rootCause(t2)
case _ =>
res1 shouldBe res2
}
}
private def checkExpectedResult(
evalMode: EvaluationMode,
res: Try[VerificationResult], expectedCost: Option[Int]): Unit = {
res match {
case Success((ok, cost)) =>
ok shouldBe true
val verificationCost = cost.toIntExact
if (expectedCost.isDefined) {
assertResult(expectedCost.get,
s"Evaluation Mode: ${evalMode.name}; Actual verify() cost $cost != expected ${expectedCost.get}")(verificationCost)
}
case Failure(t) => throw t
}
}
}
/** A number of times the newF function in each test feature is repeated.
* In combination with [[sigmastate.eval.Profiler]] it allows to collect more accurate
* timings for all operations.
* @see SigmaDslSpecification */
def nBenchmarkIters: Int = 0
def warmUpBeforeAllTest(nTotalIters: Int)(block: => Unit) = {
// each test case is executed nBenchmarkIters times in `check` method
// so we account for that here
val nIters = nTotalIters / (nBenchmarkIters + 1)
repeatAndReturnLast(nIters)(block)
System.gc()
Thread.sleep(1000) // let GC to its job before running the tests
}
case class ExistingFeature[A, B](
script: String,
scalaFunc: A => B,
expectedExpr: Option[SValue],
printExpectedExpr: Boolean = true,
logScript: Boolean = LogScriptDefault,
requireMCLowering: Boolean = false
)(implicit IR: IRContext, tA: RType[A], tB: RType[B],
override val evalSettings: EvalSettings) extends Feature[A, B] {
implicit val cs = compilerSettingsInTests
val oldImpl = () => func[A, B](script)
val newImpl = () => funcJit[A, B](script)
def checkEquality(input: A, logInputOutput: Boolean = false): Try[(B, CostDetails)] = {
// check the old implementation against Scala semantic function
val oldRes = checkEq(scalaFunc)(oldF)(input)
val newRes = checkEq(scalaFunc)({ x =>
var y: (B, CostDetails) = null
val N = nBenchmarkIters + 1
cfor(0)(_ < N, _ + 1) { _ =>
y = newF(x)
}
y
})(input)
(oldRes, newRes) match {
case (Success((oldRes, oldDetails)),
Success((newRes, newDetails))) =>
newRes shouldBe oldRes
val oldCost = oldDetails.cost
val newCost = newDetails.cost
if (newDetails.cost != oldDetails.cost) {
assertResult(true,
s"""
|New cost should not exceed old cost: (new: $newCost, old:$oldCost)
|ExistingFeature.checkEquality(
| script = "$script",
| compiledTree = "${SigmaPPrint(newF.compiledTree, height = 550, width = 150)}"
|)
|""".stripMargin
)(oldCost >= newCost / 20)
if (evalSettings.isLogEnabled) {
println(
s"""Different Costs (new: $newCost, old:$oldCost)
| input = ${SigmaPPrint(input, height = 550, width = 150)}
| script = "$script"
|
|""".stripMargin)
}
}
case _ =>
checkResult(rootCause(newRes), rootCause(oldRes), failOnTestVectors = true)
}
if (logInputOutput) {
val scriptComment = if (logScript) " // " + script else ""
val inputStr = SigmaPPrint(input, height = 550, width = 150)
val oldResStr = SigmaPPrint(oldRes, height = 550, width = 150)
println(s"($inputStr, $oldResStr),$scriptComment")
}
newRes
}
/** Depending on the featureType compares the old and new implementations against
* semantic function (scalaFunc) on the given input, also checking the given expected result.
*/
override def checkExpected(input: A, expected: Expected[B]): Unit = {
// check the old implementation with Scala semantic
val (oldRes, _) = checkEq(scalaFunc)(oldF)(input).get
oldRes shouldBe expected.value.get
if (!(newImpl eq oldImpl)) {
// check the new implementation with Scala semantic
val (newRes, _) = checkEq(scalaFunc)(newF)(input).get
newRes shouldBe expected.value.get
}
}
override def testCase(input: A,
expectedResult: Try[B],
printTestCases: Boolean,
failOnTestVectors: Boolean): Unit = {
val res = checkEquality(input, printTestCases).map(_._1)
checkResult(res, expectedResult, failOnTestVectors)
}
override def verifyCase(input: A,
expected: Expected[B],
printTestCases: Boolean,
failOnTestVectors: Boolean): Unit = {
val funcRes = checkEquality(input, printTestCases) // NOTE: funcRes comes from newImpl
checkResult(funcRes.map(_._1), expected.value, failOnTestVectors)
val newRes = expected.newResults(ergoTreeVersionInTests)
val expectedTrace = newRes._2.fold(Seq.empty[CostItem])(_.trace)
if (expectedTrace.isEmpty) {
// new cost expectation is missing, print out actual cost results
if (evalSettings.printTestVectors) {
funcRes.foreach { case (_, newCost) =>
printCostDetails(script, newCost)
}
}
}
else {
// new cost expectation is specified, compare it with the actual result
// TODO v5.0: uncomment to enable test vectors
// funcRes.foreach { case (_, newCost) =>
// if (newCost.trace != expectedTrace) {
// printCostDetails(script, newCost)
// newCost.trace shouldBe expectedTrace
// }
// }
}
checkVerify(input, expected)
}
}
case class ChangedFeature[A: RType, B: RType](
script: String,
scalaFunc: A => B,
override val scalaFuncNew: A => B,
expectedExpr: Option[SValue],
printExpectedExpr: Boolean = true,
logScript: Boolean = LogScriptDefault,
requireMCLowering: Boolean = false
)(implicit IR: IRContext, override val evalSettings: EvalSettings)
extends Feature[A, B] {
implicit val cs = compilerSettingsInTests
val oldImpl = () => func[A, B](script)
val newImpl = () => funcJit[A, B](script)
def checkEquality(input: A, logInputOutput: Boolean = false): Try[(B, CostDetails)] = {
// check the old implementation against Scala semantic function
val oldRes = checkEq(scalaFunc)(oldF)(input)
if (!(newImpl eq oldImpl)) {
// check the new implementation against Scala semantic function
val newRes = checkEq(scalaFuncNew)(newF)(input)
}
if (logInputOutput) {
val inputStr = SigmaPPrint(input, height = 550, width = 150)
val oldResStr = SigmaPPrint(oldRes, height = 550, width = 150)
val scriptComment = if (logScript) " // " + script else ""
println(s"($inputStr, $oldResStr),$scriptComment")
}
oldRes
}
/** compares the old and new implementations against
* semantic function (scalaFunc) on the given input, also checking the given expected result.
*/
override def checkExpected(input: A, expected: Expected[B]): Unit = {
// check the old implementation with Scala semantic
val (oldRes, _) = checkEq(scalaFunc)(oldF)(input).get
oldRes shouldBe expected.value.get
if (!(newImpl eq oldImpl)) {
// check the new implementation with Scala semantic
val (newRes, _) = checkEq(scalaFuncNew)(newF)(input).get
val newExpectedRes = expected.newResults(ergoTreeVersionInTests)
newRes shouldBe newExpectedRes._1.value.get
}
}
override def testCase(input: A,
expectedResult: Try[B],
printTestCases: Boolean,
failOnTestVectors: Boolean): Unit = {
val res = checkEquality(input, printTestCases).map(_._1)
checkResult(res, expectedResult, failOnTestVectors)
}
override def verifyCase(input: A,
expected: Expected[B],
printTestCases: Boolean,
failOnTestVectors: Boolean): Unit = {
val funcRes = checkEquality(input, printTestCases)
checkResult(funcRes.map(_._1), expected.value, failOnTestVectors)
checkVerify(input, expected)
}
}
case class NewFeature[A: RType, B: RType](
script: String,
override val scalaFuncNew: A => B,
expectedExpr: Option[SValue],
printExpectedExpr: Boolean = true,
logScript: Boolean = LogScriptDefault
)(implicit IR: IRContext, override val evalSettings: EvalSettings)
extends Feature[A, B] {
override def scalaFunc: A => B = { x =>
sys.error(s"Semantic Scala function is not defined for old implementation: $this")
}
implicit val cs = compilerSettingsInTests
val oldImpl = () => func[A, B](script)
val newImpl = oldImpl // funcJit[A, B](script) // TODO HF (16h): use actual new implementation here
override def checkEquality(input: A, logInputOutput: Boolean = false): Try[(B, CostDetails)] = {
val oldRes = Try(oldF(input))
oldRes.isFailure shouldBe true
if (!(newImpl eq oldImpl)) {
val newRes = checkEq(scalaFuncNew)(newF)(input)
}
oldRes
}
override def checkExpected(input: A, expected: Expected[B]): Unit = {
Try(oldF(input)).isFailure shouldBe true
if (!(newImpl eq oldImpl)) {
val (newRes, _) = checkEq(scalaFuncNew)(newF)(input).get
val newExpectedRes = expected.newResults(ergoTreeVersionInTests)
newRes shouldBe newExpectedRes._1.value.get
}
}
override def testCase(input: A,
expectedResult: Try[B],
printTestCases: Boolean,
failOnTestVectors: Boolean): Unit = {
val res = checkEquality(input, printTestCases).map(_._1)
res.isFailure shouldBe true
Try(scalaFuncNew(input)) shouldBe expectedResult
}
override def verifyCase(input: A,
expected: Expected[B],
printTestCases: Boolean,
failOnTestVectors: Boolean): Unit = {
val funcRes = checkEquality(input, printTestCases)
funcRes.isFailure shouldBe true
Try(scalaFunc(input)) shouldBe expected.value
}
}
/** Represents expected result, verification cost and costing trace for a single
* interpreter run.
* @param value expected results returned by feature function (and the corresponding Scala function)
* @param verificationCost expected cost value of the verification execution
*/
case class ExpectedResult[+A](value: Try[A], verificationCost: Option[Int])
/** Represents expected results (aka test vectors) for a single feature test case.
* @param oldResult expected results returned by v4.x interpreter
* @see [[testCases]]
*/
case class Expected[+A](oldResult: ExpectedResult[A]) {
/** Expected results returned by v4.x interpreter on the feature function. */
def value: Try[A] = oldResult.value
/** Expected verification cost returned by v4.x interpreter on the feature function. */
def verificationCostOpt: Option[Int] = oldResult.verificationCost
/** One expected result for each supported ErgoTree version.
* By default (and for most operations) the new values are equal to the old value for
* all versions, which means there are no changes in operation semantics.
* However, new verification costs are different for the old ones. To simplify
* augmentation of test cases with new test vectors, the default value of None
* signals that the test vectors should be defined. The test harness code can print
* suggestions for new test vectors.
*/
final def defaultNewResults: Seq[(ExpectedResult[A], Option[CostDetails])] = {
val n = Interpreter.MaxSupportedScriptVersion + 1
// NOTE: by default, tests vectors for both verification cost and cost details are not defined
val res = ExpectedResult(oldResult.value, None)
Array.fill(n)((res, None))
}
/** One expected result for each supported ErgoTree version.
* This expectations are applied to v5.+ interpreter (i.e. new JITC based implementation).
*/
val newResults: Seq[(ExpectedResult[A], Option[CostDetails])] = defaultNewResults
}
object Expected {
/** Used when exception is expected.
* @param error expected during execution
*/
def apply[A](error: Throwable) = new Expected[A](ExpectedResult(Failure(error), None))
/** Used when the old and new value and costs are the same for all versions.
* @param value expected result of tested function
* @param cost expected verification cost
*/
def apply[A](value: Try[A], cost: Int): Expected[A] =
new Expected(ExpectedResult(value, Some(cost)))
/** Used when the old and new value and costs are the same for all versions.
* @param value expected result of tested function
* @param cost expected verification cost
* @param expectedDetails expected cost details for all versions
*/
def apply[A](value: Try[A], cost: Int, expectedDetails: CostDetails): Expected[A] =
new Expected(ExpectedResult(value, Some(cost))) {
override val newResults = defaultNewResults.map { case (r, _) =>
(r, Some(expectedDetails))
}
}
/** Used when the old and new value and costs are the same for all versions.
*
* @param value expected result of tested function
* @param cost expected verification cost
* @param expectedDetails expected cost details for all versions
* @param expectedNewCost expected new verification cost for all versions
*/
def apply[A](value: Try[A],
cost: Int,
expectedDetails: CostDetails,
expectedNewCost: Int): Expected[A] =
new Expected(ExpectedResult(value, Some(cost))) {
override val newResults = defaultNewResults.map {
case (ExpectedResult(v, _), _) =>
(ExpectedResult(v, Some(expectedNewCost)), Some(expectedDetails))
}
}
/** Used when operation semantics changes in new versions. For those versions expected
* test vectors can be specified.
*
* @param value value returned by feature function v4.x
* @param cost expected cost value of the verification execution (v4.x)
* @param newDetails expected cost details for all versions
* @param newCost expected new verification cost for all versions
* @param newVersionedResults new results returned by each changed feature function in
* v5.+ for each ErgoTree version.
*/
def apply[A](value: Try[A], cost: Int,
newDetails: CostDetails, newCost: Int,
newVersionedResults: Seq[(Int, (ExpectedResult[A], Option[CostDetails]))]): Expected[A] =
new Expected[A](ExpectedResult(value, Some(cost))) {
override val newResults = {
val commonNewResults = defaultNewResults.map {
case (res, _) =>
(ExpectedResult(res.value, Some(newCost)), Option(newDetails))
}
commonNewResults.updateMany(newVersionedResults)
}
}
}
/** Describes existing language feature which should be equally supported in both
* Script v1 (v3.x and v4.x releases) and Script v2 (v5.x) versions of the language.
* A behavior of the given `script` is tested against semantic function.
*
* @param scalaFunc semantic function for both v1 and v2 script interpretations
* @param script the script to be tested against semantic function
* @param expectedExpr expected ErgoTree expression which corresponds to the given script
* @return feature test descriptor object which can be used to execute this test case in
* various ways
*/
def existingFeature[A: RType, B: RType]
(scalaFunc: A => B, script: String,
expectedExpr: SValue = null, requireMCLowering: Boolean = false)
(implicit IR: IRContext, evalSettings: EvalSettings): Feature[A, B] = {
ExistingFeature(
script, scalaFunc, Option(expectedExpr),
requireMCLowering = requireMCLowering)
}
/** Describes existing language feature which should be differently supported in both
* Script v1 (v3.x and v4.x releases) and Script v2 (v5.x) versions of the language.
* The behavior of the given `script` is tested against the given semantic functions.
*
* @param scalaFunc semantic function of v1 language version
* @param scalaFuncNew semantic function of v2 language version
* @param script the script to be tested against semantic functions
* @param expectedExpr expected ErgoTree expression which corresponds to the given script
* @return feature test descriptor object which can be used to execute this test case in
* various ways
*/
def changedFeature[A: RType, B: RType]
(scalaFunc: A => B, scalaFuncNew: A => B, script: String, expectedExpr: SValue = null)
(implicit IR: IRContext, evalSettings: EvalSettings): Feature[A, B] = {
ChangedFeature(script, scalaFunc, scalaFuncNew, Option(expectedExpr))
}
/** Describes a NEW language feature which must NOT be supported in v4 and
* must BE supported in v5 of the language.
*
* @param scalaFunc semantic function which defines expected behavior of the given script
* @param script the script to be tested against semantic function
* @param expectedExpr expected ErgoTree expression which corresponds to the given script
* @return feature test descriptor object which can be used to execute this test case in
* various ways
*/
def newFeature[A: RType, B: RType]
(scalaFunc: A => B, script: String, expectedExpr: SValue = null)
(implicit IR: IRContext, es: EvalSettings): Feature[A, B] = {
NewFeature(script, scalaFunc, Option(expectedExpr))
}
val contextGen: Gen[Context] = ergoLikeContextGen.map(c => c.toSigmaContext(isCost = false))
implicit val arbContext: Arbitrary[Context] = Arbitrary(contextGen)
/** NOTE, this should be `def` to allow overriding of generatorDrivenConfig in derived Spec classes. */
def DefaultMinSuccessful: MinSuccessful = MinSuccessful(generatorDrivenConfig.minSuccessful)
val PrintTestCasesDefault: Boolean = false // true
val FailOnTestVectorsDefault: Boolean = true
private def checkResult[B](res: Try[B], expectedRes: Try[B], failOnTestVectors: Boolean): Unit = {
(res, expectedRes) match {
case (Failure(exception), Failure(expectedException)) =>
rootCause(exception).getClass shouldBe expectedException.getClass
case _ =>
if (failOnTestVectors) {
val actual = rootCause(res)
if (expectedRes != actual) {
val actualPrinted = SigmaPPrint(actual, height = 150).plainText
assert(false, s"Actual: $actualPrinted")
}
}
else {
if (expectedRes != res) {
print("\nSuggested Expected Result: ")
SigmaPPrint.pprintln(res, height = 150)
}
}
}
}
/** Test the given test cases with expected results (aka test vectors).
* NOTE, in some cases (such as Context, Box, etc) sample generation is time consuming, so it
* makes sense to factor it out.
* @param preGeneratedSamples optional pre-generated samples to reduce execution time
*/
def testCases[A: Ordering : Arbitrary : ClassTag, B]
(cases: Seq[(A, Try[B])],
f: Feature[A, B],
printTestCases: Boolean = PrintTestCasesDefault,
failOnTestVectors: Boolean = FailOnTestVectorsDefault,
preGeneratedSamples: Option[Seq[A]] = None): Unit = {
System.gc() // force GC to avoid occasional OOM exception
val table = Table(("x", "y"), cases:_*)
forAll(table) { (x: A, expectedRes: Try[B]) =>
f.testCase(x, expectedRes, printTestCases, failOnTestVectors)
}
test(preGeneratedSamples, f, printTestCases)
}
/** Test the given test cases with expected results AND costs (aka test vectors).
* For all Success cases `f.checkVerify` is executed to exercise the whole
* `Interpreter.verify` execution and assert the expected cost.
*
* NOTE, in some cases (such as Context, Box, etc) sample generation is time consuming, so it
* makes sense to factor it out.
*
* @param preGeneratedSamples optional pre-generated samples to reduce execution time
* if None, then the given Arbitrary is used to generate samples
*/
def verifyCases[A: Ordering : Arbitrary : ClassTag, B]
(cases: Seq[(A, Expected[B])],
f: Feature[A, B],
printTestCases: Boolean = PrintTestCasesDefault,
failOnTestVectors: Boolean = FailOnTestVectorsDefault,
preGeneratedSamples: Option[Seq[A]] = None): Unit = {
val table = Table(("x", "y"), cases:_*)
forAll(table) { (x: A, expectedRes: Expected[B]) =>
f.verifyCase(x, expectedRes, printTestCases, failOnTestVectors)
}
test(preGeneratedSamples, f, printTestCases)
}
def verifyCasesMany[A: Ordering : Arbitrary : ClassTag, B]
(cases: Seq[(A, Expected[B])],
features: Seq[Feature[A, B]],
printTestCases: Boolean = PrintTestCasesDefault,
failOnTestVectors: Boolean = FailOnTestVectorsDefault,
preGeneratedSamples: Option[Seq[A]] = None): Unit = {
features.foreach { f =>
verifyCases(cases, f, printTestCases, failOnTestVectors, preGeneratedSamples)
}
}
case class MeasureInfo[A](input: A, iteration: Int, nIters: Int, measuredTime: Long)
type MeasureFormatter[A] = MeasureInfo[A] => String
def benchmarkCases[A: Ordering : Arbitrary : ClassTag, B]
(cases: Seq[A], f: Feature[A, B], nIters: Int, formatter: MeasureFormatter[A])
(implicit IR: IRContext, evalSettings: EvalSettings): Seq[Long] = {
val fNew = f.newF
implicit val tA = fNew.tA
implicit val tB = fNew.tB
implicit val cs = defaultCompilerSettings
val func = funcJitFast[A, B](f.script)
val noTraceSettings = evalSettings.copy(
isMeasureOperationTime = false,
costTracingEnabled = false)
val funcNoTrace = funcJitFast[A, B](f.script)(tA, tB, IR, noTraceSettings, cs)
var iCase = 0
val (res, total) = BenchmarkUtil.measureTimeNano {
cases.map { x =>
assert(func(x)._1 == f.newF(x)._1)
iCase += 1
def benchmarkCase(func: CompiledFunc[A,B], printOut: Boolean) = {
val (_, t) = BenchmarkUtil.measureTimeNano {
cfor(0)(_ < nIters, _ + 1) { i =>
val res = func(x)
}
}
if (printOut) {
val info = MeasureInfo(x, iCase, nIters, t)
val out = formatter(info)
println(out)
}
t
}
benchmarkCase(func, printOut = false)
benchmarkCase(funcNoTrace, printOut = true)
}
}
println(s"Total time: ${total / 1000000} msec")
res
}
/** Generate samples in sorted order.
* @param gen generator to be used for sample generation
* @param config generation configuration
* @return array-backed ordered sequence of samples
*/
def genSamples[A: Ordering: ClassTag](gen: Gen[A], config: PropertyCheckConfigParam): Seq[A] = {
implicit val arb: Arbitrary[A] = Arbitrary(gen)
genSamples[A](config)
}
/** Generate samples in sorted order.
* @param config generation configuration
* @return array-backed ordered sequence of samples
*/
def genSamples[A: Arbitrary: Ordering: ClassTag](config: PropertyCheckConfigParam): Seq[A] = {
genSamples[A](config, Some(implicitly[Ordering[A]]))
}
/** Generate samples with optional sorted order.
* @param config generation configuration
* @param optOrd optional ordering of the generated samples in the resuting sequence
* @return array-backed ordered sequence of samples
*/
def genSamples[A: Arbitrary: ClassTag](config: PropertyCheckConfigParam, optOrd: Option[Ordering[A]]): Seq[A] = {
val inputs = scala.collection.mutable.ArrayBuilder.make[A]()
forAll(config) { x: A =>
inputs += x
}
optOrd.fold(inputs.result())(implicit ord => inputs.result.sorted)
}
/** Test the given samples or generate new samples using the given Arbitrary.
* For each sample `f.checkEquality` is executed.
*/
def test[A: Arbitrary: Ordering : ClassTag, B]
(preGeneratedSamples: Option[Seq[A]],
f: Feature[A, B],
printTestCases: Boolean): Unit = {
// either get provides or generate new samples (in sorted order)
val samples = preGeneratedSamples.getOrElse(genSamples[A](DefaultMinSuccessful))
// then tests them, this will output a nice log of test cases (provided printTestCases == true)
samples.foreach { x =>
f.checkEquality(x, printTestCases)
}
}
def test[A: Arbitrary : Ordering : ClassTag, B](samples: Seq[A], f: Feature[A, B]): Unit = {
test(Some(samples), f, PrintTestCasesDefault)
}
def test[A: Arbitrary : Ordering : ClassTag, B]
(f: Feature[A, B],
printTestCases: Boolean = PrintTestCasesDefault): Unit = {
test(None, f, printTestCases)
}
/** Represents generated samples for the type `A`. */
abstract class Sampled[A] {
/** An instance of [[Arbitrary]] which is used to generate samples. */
def arbitrary: Arbitrary[A]
/** Return a sequence of samples. */
def samples: Seq[A]
}
/** Default implementation of [[Sampled]]. */
case class SampledData[A](samples: Seq[A])(implicit val arbitrary: Arbitrary[A])
extends Sampled[A]
/** Arbitrary instance for each type descriptor. */
private val arbitraryCache = new mutable.HashMap[RType[_], Arbitrary[_]]
/** Lookup [[Arbitrary]] in the cache by type descriptor or create new instance and
* add it to the cache.
*/
def lookupArbitrary[A](t: RType[A]): Arbitrary[A] = (arbitraryCache.get(t) match {
case Some(arb) => arb
case None =>
val arb = (t match {
case BooleanType => arbBool
case ByteType => arbByte
case ShortType => arbShort
case IntType => arbInt
case LongType => arbLong
case BigIntRType => arbBigInt
case GroupElementRType => arbGroupElement
case SigmaPropRType => arbSigmaProp
case BoxRType => arbBox
case PreHeaderRType => arbPreHeader
case HeaderRType => arbHeader
case AvlTreeRType => arbAvlTree
case AnyType => arbAnyVal
case UnitType => arbUnit
case p: PairType[a, b] =>
implicit val arbA: Arbitrary[a] = lookupArbitrary[a](p.tFst)
implicit val arbB: Arbitrary[b] = lookupArbitrary[b](p.tSnd)
arbTuple2[a,b]
case opt: OptionType[a] =>
Arbitrary(frequency((5, None), (5, for (x <- lookupArbitrary(opt.tA).arbitrary) yield Some(x))))
case coll: CollType[a] =>
implicit val elemArb: Arbitrary[a] = lookupArbitrary(coll.tItem)
implicit val elemT: RType[a] = coll.tItem
Arbitrary(collGen[a])
}).asInstanceOf[Arbitrary[A]]
arbitraryCache.put(t, arb)
arb
}).asInstanceOf[Arbitrary[A]]
/** Update cached [[Arbitrary]] with a new instance, which generates its data from the
* given [[Sampled]] instance (randomly selects oneOf sample).
*/
def updateArbitrary[A](t: RType[A], sampled: Sampled[A]) = {
t match {
case BigIntRType | GroupElementRType | SigmaPropRType |
BoxRType | PreHeaderRType | HeaderRType | AvlTreeRType |
_: CollType[_] | _: PairType[_,_] | _: OptionType[_] =>
val newArb = Arbitrary(Gen.oneOf(sampled.samples))
arbitraryCache.put(t, newArb)
case _ =>
}
}
/** Sampled test data from each data type. */
private val sampledCache = new mutable.HashMap[RType[_], Sampled[_]]
/** Lookup [[Sampled]] test data in the cache by type descriptor or create a new instance and
* add it to the cache.
*/
implicit def lookupSampled[A](implicit t: RType[A]): Sampled[A] = (sampledCache.get(t) match {
case Some(s) => s
case _ =>
implicit val tagA = t.classTag
implicit val arb = lookupArbitrary(t)
val res = new SampledData[A](
samples = genSamples[A](DefaultMinSuccessful, None))
sampledCache.put(t, res)
updateArbitrary(t, res)
res
}).asInstanceOf[Sampled[A]]
/** Call this function to prepare samples for the given type.
* They can later be retrieved using `lookupSampled`. */
def prepareSamples[A](implicit t: RType[A]) = {
lookupSampled[A]
}
/** Helper implementation for ordering samples. */
trait GroupElementOrdering extends Ordering[GroupElement] {
/** Compares `x: ECPoint` string representation with `y: ECPoint` string for order.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: GroupElement, y: GroupElement): Int = {
SigmaDsl.toECPoint(x).toString.compareTo(SigmaDsl.toECPoint(y).toString)
}
}
implicit object GroupElementOrdering extends GroupElementOrdering
/** Helper implementation for ordering samples. */
trait AvlTreeOrdering extends Ordering[AvlTree] {
/** Compares this `x: AvlTree` string representation with `y: AvlTree` string for order.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: AvlTree, y: AvlTree): Int = {
x.toString.compareTo(y.toString)
}
}
implicit object AvlTreeOrdering extends AvlTreeOrdering
/** Helper implementation for ordering samples. */
class CollOrdering[T: Ordering] extends Ordering[Coll[T]] {
val O = Ordering[Iterable[T]]
/** Compares this `x: Coll` with `y: Coll` using Ordering for underlying Array.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: Coll[T], y: Coll[T]): Int = {
O.compare(x.toArray, y.toArray)
}
}
implicit def collOrdering[T: Ordering]: Ordering[Coll[T]] = new CollOrdering[T]
/** Helper implementation for ordering samples. */
trait BoxOrdering extends Ordering[Box] {
/** Compares this `x: Box` string representation with `y: Box` string for order.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: Box, y: Box): Int = {
x.toString.compareTo(y.toString)
}
}
implicit object BoxOrdering extends BoxOrdering
/** Helper implementation for ordering samples. */
trait PreHeaderOrdering extends Ordering[PreHeader] {
/** Compares this `x: PreHeader` with `y: PreHeader` using block height.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: PreHeader, y: PreHeader): Int = {
Ordering.Int.compare(x.height, y.height)
}
}
implicit object PreHeaderOrdering extends PreHeaderOrdering
/** Helper implementation for ordering samples. */
trait HeaderOrdering extends Ordering[Header] {
/** Compares this `x: Header` with `y: Header` using block height.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: Header, y: Header): Int = {
Ordering.Int.compare(x.height, y.height)
}
}
implicit object HeaderOrdering extends HeaderOrdering
/** Helper implementation for ordering samples. */
trait ContextOrdering extends Ordering[Context] {
val O: Ordering[(Int, Coll[Byte])] = Ordering[(Int, Coll[Byte])]
/** Compares this `x: Context` with `y: Context` using block height and SELF.id.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: Context, y: Context): Int = {
O.compare((x.HEIGHT, x.SELF.id), (y.HEIGHT, y.SELF.id))
}
}
implicit object ContextOrdering extends ContextOrdering
/** Helper implementation for ordering samples. */
trait SigmaPropOrdering extends Ordering[SigmaProp] {
/** Compares this `x: SigmaProp` with `y: SigmaProp` using string representation.
* @return a negative integer, zero, or a positive integer as the
* `x` is less than, equal to, or greater than `y`.
*/
def compare(x: SigmaProp, y: SigmaProp): Int = {
x.toString.compareTo(y.toString)
}
}
implicit object SigmaPropOrdering extends SigmaPropOrdering
}
| ScorexFoundation/sigmastate-interpreter | sigmastate/src/test/scala/special/sigma/SigmaDslTesting.scala | Scala | mit | 51,257 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream, File, IOException}
import java.security.PrivilegedExceptionAction
import java.text.DateFormat
import java.util.{Arrays, Comparator, Date, Locale}
import scala.collection.JavaConverters._
import scala.collection.immutable.Map
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.util.control.NonFatal
import com.google.common.primitives.Longs
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path, PathFilter}
import org.apache.hadoop.fs.permission.FsAction
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.security.token.{Token, TokenIdentifier}
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
/**
* :: DeveloperApi ::
* Contains util methods to interact with Hadoop from Spark.
*/
@DeveloperApi
class SparkHadoopUtil extends Logging {
private val sparkConf = new SparkConf(false).loadFromSystemProperties(true)
val conf: Configuration = newConfiguration(sparkConf)
UserGroupInformation.setConfiguration(conf)
/**
* Runs the given function with a Hadoop UserGroupInformation as a thread local variable
* (distributed to child threads), used for authenticating HDFS and YARN calls.
*
* IMPORTANT NOTE: If this function is going to be called repeated in the same process
* you need to look https://issues.apache.org/jira/browse/HDFS-3545 and possibly
* do a FileSystem.closeAllForUGI in order to avoid leaking Filesystems
*/
def runAsSparkUser(func: () => Unit) {
createSparkUser().doAs(new PrivilegedExceptionAction[Unit] {
def run: Unit = func()
})
}
def createSparkUser(): UserGroupInformation = {
val user = Utils.getCurrentUserName()
logDebug("creating UGI for user: " + user)
val ugi = UserGroupInformation.createRemoteUser(user)
transferCredentials(UserGroupInformation.getCurrentUser(), ugi)
ugi
}
def transferCredentials(source: UserGroupInformation, dest: UserGroupInformation) {
for (token <- source.getTokens.asScala) {
dest.addToken(token)
}
}
/**
* Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
* configuration.
*/
def appendS3AndSparkHadoopConfigurations(conf: SparkConf, hadoopConf: Configuration): Unit = {
SparkHadoopUtil.appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
}
/**
* Appends spark.hadoop.* configurations from a [[SparkConf]] to a Hadoop
* configuration without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
SparkHadoopUtil.appendSparkHadoopConfigs(conf, hadoopConf)
}
/**
* Appends spark.hadoop.* configurations from a Map to another without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(
srcMap: Map[String, String],
destMap: HashMap[String, String]): Unit = {
// Copy any "spark.hadoop.foo=bar" system properties into destMap as "foo=bar"
for ((key, value) <- srcMap if key.startsWith("spark.hadoop.")) {
destMap.put(key.substring("spark.hadoop.".length), value)
}
}
/**
* Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
* subsystems.
*/
def newConfiguration(conf: SparkConf): Configuration = {
SparkHadoopUtil.newConfiguration(conf)
}
/**
* Add any user credentials to the job conf which are necessary for running on a secure Hadoop
* cluster.
*/
def addCredentials(conf: JobConf) {}
def isYarnMode(): Boolean = { false }
def addSecretKeyToUserCredentials(key: String, secret: String) {}
def getSecretKeyFromUserCredentials(key: String): Array[Byte] = { null }
def getCurrentUserCredentials(): Credentials = {
UserGroupInformation.getCurrentUser().getCredentials()
}
def addCurrentUserCredentials(creds: Credentials): Unit = {
UserGroupInformation.getCurrentUser.addCredentials(creds)
}
def loginUserFromKeytab(principalName: String, keytabFilename: String): Unit = {
if (!new File(keytabFilename).exists()) {
throw new SparkException(s"Keytab file: ${keytabFilename} does not exist")
} else {
logInfo("Attempting to login to Kerberos " +
s"using principal: ${principalName} and keytab: ${keytabFilename}")
UserGroupInformation.loginUserFromKeytab(principalName, keytabFilename)
}
}
/**
* Add or overwrite current user's credentials with serialized delegation tokens,
* also confirms correct hadoop configuration is set.
*/
private[spark] def addDelegationTokens(tokens: Array[Byte], sparkConf: SparkConf) {
UserGroupInformation.setConfiguration(newConfiguration(sparkConf))
val creds = deserialize(tokens)
logInfo(s"Adding/updating delegation tokens ${dumpTokens(creds)}")
addCurrentUserCredentials(creds)
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes read. If
* getFSBytesReadOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes read on r since t.
*/
private[spark] def getFSBytesReadOnThreadCallback(): () => Long = {
val f = () => FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics.getBytesRead).sum
val baseline = (Thread.currentThread().getId, f())
/**
* This function may be called in both spawned child threads and parent task thread (in
* PythonRDD), and Hadoop FileSystem uses thread local variables to track the statistics.
* So we need a map to track the bytes read from the child threads and parent thread,
* summing them together to get the bytes read of this task.
*/
new Function0[Long] {
private val bytesReadMap = new mutable.HashMap[Long, Long]()
override def apply(): Long = {
bytesReadMap.synchronized {
bytesReadMap.put(Thread.currentThread().getId, f())
bytesReadMap.map { case (k, v) =>
v - (if (k == baseline._1) baseline._2 else 0)
}.sum
}
}
}
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes written. If
* getFSBytesWrittenOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes written on r since t.
*
* @return None if the required method can't be found.
*/
private[spark] def getFSBytesWrittenOnThreadCallback(): () => Long = {
val threadStats = FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics)
val f = () => threadStats.map(_.getBytesWritten).sum
val baselineBytesWritten = f()
() => f() - baselineBytesWritten
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafStatuses(fs, fs.getFileStatus(basePath))
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, leaves) = fs.listStatus(status.getPath).partition(_.isDirectory)
leaves ++ directories.flatMap(f => listLeafStatuses(fs, f))
}
if (baseStatus.isDirectory) recurse(baseStatus) else Seq(baseStatus)
}
def listLeafDirStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafDirStatuses(fs, fs.getFileStatus(basePath))
}
def listLeafDirStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, files) = fs.listStatus(status.getPath).partition(_.isDirectory)
val leaves = if (directories.isEmpty) Seq(status) else Seq.empty[FileStatus]
leaves ++ directories.flatMap(dir => listLeafDirStatuses(fs, dir))
}
assert(baseStatus.isDirectory)
recurse(baseStatus)
}
def isGlobPath(pattern: Path): Boolean = {
pattern.toString.exists("{}[]*?\\\\".toSet.contains)
}
def globPath(pattern: Path): Seq[Path] = {
val fs = pattern.getFileSystem(conf)
globPath(fs, pattern)
}
def globPath(fs: FileSystem, pattern: Path): Seq[Path] = {
Option(fs.globStatus(pattern)).map { statuses =>
statuses.map(_.getPath.makeQualified(fs.getUri, fs.getWorkingDirectory)).toSeq
}.getOrElse(Seq.empty[Path])
}
def globPathIfNecessary(pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(pattern) else Seq(pattern)
}
def globPathIfNecessary(fs: FileSystem, pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(fs, pattern) else Seq(pattern)
}
/**
* Lists all the files in a directory with the specified prefix, and does not end with the
* given suffix. The returned {{FileStatus}} instances are sorted by the modification times of
* the respective files.
*/
def listFilesSorted(
remoteFs: FileSystem,
dir: Path,
prefix: String,
exclusionSuffix: String): Array[FileStatus] = {
try {
val fileStatuses = remoteFs.listStatus(dir,
new PathFilter {
override def accept(path: Path): Boolean = {
val name = path.getName
name.startsWith(prefix) && !name.endsWith(exclusionSuffix)
}
})
Arrays.sort(fileStatuses, new Comparator[FileStatus] {
override def compare(o1: FileStatus, o2: FileStatus): Int = {
Longs.compare(o1.getModificationTime, o2.getModificationTime)
}
})
fileStatuses
} catch {
case NonFatal(e) =>
logWarning("Error while attempting to list files from application staging dir", e)
Array.empty
}
}
private[spark] def getSuffixForCredentialsPath(credentialsPath: Path): Int = {
val fileName = credentialsPath.getName
fileName.substring(
fileName.lastIndexOf(SparkHadoopUtil.SPARK_YARN_CREDS_COUNTER_DELIM) + 1).toInt
}
private val HADOOP_CONF_PATTERN = "(\\\\$\\\\{hadoopconf-[^\\\\}\\\\$\\\\s]+\\\\})".r.unanchored
/**
* Substitute variables by looking them up in Hadoop configs. Only variables that match the
* ${hadoopconf- .. } pattern are substituted.
*/
def substituteHadoopVariables(text: String, hadoopConf: Configuration): String = {
text match {
case HADOOP_CONF_PATTERN(matched) =>
logDebug(text + " matched " + HADOOP_CONF_PATTERN)
val key = matched.substring(13, matched.length() - 1) // remove ${hadoopconf- .. }
val eval = Option[String](hadoopConf.get(key))
.map { value =>
logDebug("Substituted " + matched + " with " + value)
text.replace(matched, value)
}
if (eval.isEmpty) {
// The variable was not found in Hadoop configs, so return text as is.
text
} else {
// Continue to substitute more variables.
substituteHadoopVariables(eval.get, hadoopConf)
}
case _ =>
logDebug(text + " didn't match " + HADOOP_CONF_PATTERN)
text
}
}
/**
* Start a thread to periodically update the current user's credentials with new credentials so
* that access to secured service does not fail.
*/
private[spark] def startCredentialUpdater(conf: SparkConf) {}
/**
* Stop the thread that does the credential updates.
*/
private[spark] def stopCredentialUpdater() {}
/**
* Return a fresh Hadoop configuration, bypassing the HDFS cache mechanism.
* This is to prevent the DFSClient from using an old cached token to connect to the NameNode.
*/
private[spark] def getConfBypassingFSCache(
hadoopConf: Configuration,
scheme: String): Configuration = {
val newConf = new Configuration(hadoopConf)
val confKey = s"fs.${scheme}.impl.disable.cache"
newConf.setBoolean(confKey, true)
newConf
}
/**
* Dump the credentials' tokens to string values.
*
* @param credentials credentials
* @return an iterator over the string values. If no credentials are passed in: an empty list
*/
private[spark] def dumpTokens(credentials: Credentials): Iterable[String] = {
if (credentials != null) {
credentials.getAllTokens.asScala.map(tokenToString)
} else {
Seq.empty
}
}
/**
* Convert a token to a string for logging.
* If its an abstract delegation token, attempt to unmarshall it and then
* print more details, including timestamps in human-readable form.
*
* @param token token to convert to a string
* @return a printable string value.
*/
private[spark] def tokenToString(token: Token[_ <: TokenIdentifier]): String = {
val df = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT, Locale.US)
val buffer = new StringBuilder(128)
buffer.append(token.toString)
try {
val ti = token.decodeIdentifier
buffer.append("; ").append(ti)
ti match {
case dt: AbstractDelegationTokenIdentifier =>
// include human times and the renewer, which the HDFS tokens toString omits
buffer.append("; Renewer: ").append(dt.getRenewer)
buffer.append("; Issued: ").append(df.format(new Date(dt.getIssueDate)))
buffer.append("; Max Date: ").append(df.format(new Date(dt.getMaxDate)))
case _ =>
}
} catch {
case e: IOException =>
logDebug(s"Failed to decode $token: $e", e)
}
buffer.toString
}
private[spark] def checkAccessPermission(status: FileStatus, mode: FsAction): Boolean = {
val perm = status.getPermission
val ugi = UserGroupInformation.getCurrentUser
if (ugi.getShortUserName == status.getOwner) {
if (perm.getUserAction.implies(mode)) {
return true
}
} else if (ugi.getGroupNames.contains(status.getGroup)) {
if (perm.getGroupAction.implies(mode)) {
return true
}
} else if (perm.getOtherAction.implies(mode)) {
return true
}
logDebug(s"Permission denied: user=${ugi.getShortUserName}, " +
s"path=${status.getPath}:${status.getOwner}:${status.getGroup}" +
s"${if (status.isDirectory) "d" else "-"}$perm")
false
}
def serialize(creds: Credentials): Array[Byte] = {
val byteStream = new ByteArrayOutputStream
val dataStream = new DataOutputStream(byteStream)
creds.writeTokenStorageToStream(dataStream)
byteStream.toByteArray
}
def deserialize(tokenBytes: Array[Byte]): Credentials = {
val tokensBuf = new ByteArrayInputStream(tokenBytes)
val creds = new Credentials()
creds.readTokenStorageStream(new DataInputStream(tokensBuf))
creds
}
def isProxyUser(ugi: UserGroupInformation): Boolean = {
ugi.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.PROXY
}
}
object SparkHadoopUtil {
private lazy val hadoop = new SparkHadoopUtil
private lazy val yarn = try {
Utils.classForName("org.apache.spark.deploy.yarn.YarnSparkHadoopUtil")
.newInstance()
.asInstanceOf[SparkHadoopUtil]
} catch {
case e: Exception => throw new SparkException("Unable to load YARN support", e)
}
val SPARK_YARN_CREDS_TEMP_EXTENSION = ".tmp"
val SPARK_YARN_CREDS_COUNTER_DELIM = "-"
/**
* Number of records to update input metrics when reading from HadoopRDDs.
*
* Each update is potentially expensive because we need to use reflection to access the
* Hadoop FileSystem API of interest (only available in 2.5), so we should do this sparingly.
*/
private[spark] val UPDATE_INPUT_METRICS_INTERVAL_RECORDS = 1000
def get: SparkHadoopUtil = {
// Check each time to support changing to/from YARN
val yarnMode = java.lang.Boolean.parseBoolean(
System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE")))
if (yarnMode) {
yarn
} else {
hadoop
}
}
/**
* Given an expiration date (e.g. for Hadoop Delegation Tokens) return a the date
* when a given fraction of the duration until the expiration date has passed.
* Formula: current time + (fraction * (time until expiration))
* @param expirationDate Drop-dead expiration date
* @param fraction fraction of the time until expiration return
* @return Date when the fraction of the time until expiration has passed
*/
private[spark] def getDateOfNextUpdate(expirationDate: Long, fraction: Double): Long = {
val ct = System.currentTimeMillis
(ct + (fraction * (expirationDate - ct))).toLong
}
/**
* Returns a Configuration object with Spark configuration applied on top. Unlike
* the instance method, this will always return a Configuration instance, and not a
* cluster manager-specific type.
*/
private[spark] def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = new Configuration()
appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
hadoopConf
}
private def appendS3AndSparkHadoopConfigurations(
conf: SparkConf,
hadoopConf: Configuration): Unit = {
// Note: this null check is around more than just access to the "conf" object to maintain
// the behavior of the old implementation of this code, for backwards compatibility.
if (conf != null) {
// Explicitly check for S3 environment variables
val keyId = System.getenv("AWS_ACCESS_KEY_ID")
val accessKey = System.getenv("AWS_SECRET_ACCESS_KEY")
if (keyId != null && accessKey != null) {
hadoopConf.set("fs.s3.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3n.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3a.access.key", keyId)
hadoopConf.set("fs.s3.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3n.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3a.secret.key", accessKey)
val sessionToken = System.getenv("AWS_SESSION_TOKEN")
if (sessionToken != null) {
hadoopConf.set("fs.s3a.session.token", sessionToken)
}
}
appendSparkHadoopConfigs(conf, hadoopConf)
val bufferSize = conf.get("spark.buffer.size", "65536")
hadoopConf.set("io.file.buffer.size", bufferSize)
}
}
private def appendSparkHadoopConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Copy any "spark.hadoop.foo=bar" spark properties into conf as "foo=bar"
for ((key, value) <- conf.getAll if key.startsWith("spark.hadoop.")) {
hadoopConf.set(key.substring("spark.hadoop.".length), value)
}
}
}
| adrian-ionescu/apache-spark | core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala | Scala | apache-2.0 | 19,904 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.graph.scala.utils
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.graph.Vertex
@SerialVersionUID(1L)
class Tuple2ToVertexMap[K, VV] extends MapFunction[(K, VV), Vertex[K, VV]] {
override def map(value: (K, VV)): Vertex[K, VV] = {
new Vertex(value._1, value._2)
}
}
| apache/flink | flink-libraries/flink-gelly-scala/src/main/scala/org/apache/flink/graph/scala/utils/Tuple2ToVertexMap.scala | Scala | apache-2.0 | 1,141 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.model
import scala.collection.mutable
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.{Logging, SparkContext}
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.configuration.Algo
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.configuration.EnsembleCombiningStrategy._
import org.apache.spark.mllib.tree.loss.Loss
import org.apache.spark.mllib.util.{Loader, Saveable}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.util.Utils
/**
* :: Experimental ::
* Represents a random forest model.
*
* @param algo algorithm for the ensemble model, either Classification or Regression
* @param trees tree ensembles
*/
@Since("1.2.0")
@Experimental
class RandomForestModel @Since("1.2.0") (
@Since("1.2.0") override val algo: Algo,
@Since("1.2.0") override val trees: Array[DecisionTreeModel])
extends TreeEnsembleModel(algo, trees, Array.fill(trees.length)(1.0),
combiningStrategy = if (algo == Classification) Vote else Average)
with Saveable {
require(trees.forall(_.algo == algo))
/**
*
* @param sc Spark context used to save model data.
* @param path Path specifying the directory in which to save this model.
* If the directory already exists, this method throws an exception.
*/
@Since("1.3.0")
override def save(sc: SparkContext, path: String): Unit = {
TreeEnsembleModel.SaveLoadV1_0.save(sc, path, this,
RandomForestModel.SaveLoadV1_0.thisClassName)
}
override protected def formatVersion: String = RandomForestModel.formatVersion
}
@Since("1.3.0")
object RandomForestModel extends Loader[RandomForestModel] {
private[mllib] def formatVersion: String = TreeEnsembleModel.SaveLoadV1_0.thisFormatVersion
/**
*
* @param sc Spark context used for loading model files.
* @param path Path specifying the directory to which the model was saved.
* @return Model instance
*/
@Since("1.3.0")
override def load(sc: SparkContext, path: String): RandomForestModel = {
val (loadedClassName, version, jsonMetadata) = Loader.loadMetadata(sc, path)
val classNameV1_0 = SaveLoadV1_0.thisClassName
(loadedClassName, version) match {
case (className, "1.0") if className == classNameV1_0 =>
val metadata = TreeEnsembleModel.SaveLoadV1_0.readMetadata(jsonMetadata)
assert(metadata.treeWeights.forall(_ == 1.0))
val trees =
TreeEnsembleModel.SaveLoadV1_0.loadTrees(sc, path, metadata.treeAlgo)
new RandomForestModel(Algo.fromString(metadata.algo), trees)
case _ => throw new Exception(s"RandomForestModel.load did not recognize model" +
s" with (className, format version): ($loadedClassName, $version). Supported:\\n" +
s" ($classNameV1_0, 1.0)")
}
}
private object SaveLoadV1_0 {
// Hard-code class name string in case it changes in the future
def thisClassName: String = "org.apache.spark.mllib.tree.model.RandomForestModel"
}
}
/**
* :: Experimental ::
* Represents a gradient boosted trees model.
*
* @param algo algorithm for the ensemble model, either Classification or Regression
* @param trees tree ensembles
* @param treeWeights tree ensemble weights
*/
@Since("1.2.0")
@Experimental
class GradientBoostedTreesModel @Since("1.2.0") (
@Since("1.2.0") override val algo: Algo,
@Since("1.2.0") override val trees: Array[DecisionTreeModel],
@Since("1.2.0") override val treeWeights: Array[Double])
extends TreeEnsembleModel(algo, trees, treeWeights, combiningStrategy = Sum)
with Saveable {
require(trees.length == treeWeights.length)
/**
* @param sc Spark context used to save model data.
* @param path Path specifying the directory in which to save this model.
* If the directory already exists, this method throws an exception.
*/
@Since("1.3.0")
override def save(sc: SparkContext, path: String): Unit = {
TreeEnsembleModel.SaveLoadV1_0.save(sc, path, this,
GradientBoostedTreesModel.SaveLoadV1_0.thisClassName)
}
/**
* Method to compute error or loss for every iteration of gradient boosting.
* @param data RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]
* @param loss evaluation metric.
* @return an array with index i having the losses or errors for the ensemble
* containing the first i+1 trees
*/
@Since("1.4.0")
def evaluateEachIteration(
data: RDD[LabeledPoint],
loss: Loss): Array[Double] = {
val sc = data.sparkContext
val remappedData = algo match {
case Classification => data.map(x => new LabeledPoint((x.label * 2) - 1, x.features))
case _ => data
}
val numIterations = trees.length
val evaluationArray = Array.fill(numIterations)(0.0)
val localTreeWeights = treeWeights
var predictionAndError = GradientBoostedTreesModel.computeInitialPredictionAndError(
remappedData, localTreeWeights(0), trees(0), loss)
evaluationArray(0) = predictionAndError.values.mean()
val broadcastTrees = sc.broadcast(trees)
(1 until numIterations).foreach { nTree =>
predictionAndError = remappedData.zip(predictionAndError).mapPartitions { iter =>
val currentTree = broadcastTrees.value(nTree)
val currentTreeWeight = localTreeWeights(nTree)
iter.map { case (point, (pred, error)) =>
val newPred = pred + currentTree.predict(point.features) * currentTreeWeight
val newError = loss.computeError(newPred, point.label)
(newPred, newError)
}
}
evaluationArray(nTree) = predictionAndError.values.mean()
}
broadcastTrees.unpersist()
evaluationArray
}
override protected def formatVersion: String = GradientBoostedTreesModel.formatVersion
}
/**
*/
@Since("1.3.0")
object GradientBoostedTreesModel extends Loader[GradientBoostedTreesModel] {
/**
* Compute the initial predictions and errors for a dataset for the first
* iteration of gradient boosting.
* @param data: training data.
* @param initTreeWeight: learning rate assigned to the first tree.
* @param initTree: first DecisionTreeModel.
* @param loss: evaluation metric.
* @return a RDD with each element being a zip of the prediction and error
* corresponding to every sample.
*/
@Since("1.4.0")
def computeInitialPredictionAndError(
data: RDD[LabeledPoint],
initTreeWeight: Double,
initTree: DecisionTreeModel,
loss: Loss): RDD[(Double, Double)] = {
data.map { lp =>
val pred = initTreeWeight * initTree.predict(lp.features)
val error = loss.computeError(pred, lp.label)
(pred, error)
}
}
/**
* Update a zipped predictionError RDD
* (as obtained with computeInitialPredictionAndError)
* @param data: training data.
* @param predictionAndError: predictionError RDD
* @param treeWeight: Learning rate.
* @param tree: Tree using which the prediction and error should be updated.
* @param loss: evaluation metric.
* @return a RDD with each element being a zip of the prediction and error
* corresponding to each sample.
*/
@Since("1.4.0")
def updatePredictionError(
data: RDD[LabeledPoint],
predictionAndError: RDD[(Double, Double)],
treeWeight: Double,
tree: DecisionTreeModel,
loss: Loss): RDD[(Double, Double)] = {
val newPredError = data.zip(predictionAndError).mapPartitions { iter =>
iter.map { case (lp, (pred, error)) =>
val newPred = pred + tree.predict(lp.features) * treeWeight
val newError = loss.computeError(newPred, lp.label)
(newPred, newError)
}
}
newPredError
}
private[mllib] def formatVersion: String = TreeEnsembleModel.SaveLoadV1_0.thisFormatVersion
/**
* @param sc Spark context used for loading model files.
* @param path Path specifying the directory to which the model was saved.
* @return Model instance
*/
@Since("1.3.0")
override def load(sc: SparkContext, path: String): GradientBoostedTreesModel = {
val (loadedClassName, version, jsonMetadata) = Loader.loadMetadata(sc, path)
val classNameV1_0 = SaveLoadV1_0.thisClassName
(loadedClassName, version) match {
case (className, "1.0") if className == classNameV1_0 =>
val metadata = TreeEnsembleModel.SaveLoadV1_0.readMetadata(jsonMetadata)
assert(metadata.combiningStrategy == Sum.toString)
val trees =
TreeEnsembleModel.SaveLoadV1_0.loadTrees(sc, path, metadata.treeAlgo)
new GradientBoostedTreesModel(Algo.fromString(metadata.algo), trees, metadata.treeWeights)
case _ => throw new Exception(s"GradientBoostedTreesModel.load did not recognize model" +
s" with (className, format version): ($loadedClassName, $version). Supported:\\n" +
s" ($classNameV1_0, 1.0)")
}
}
private object SaveLoadV1_0 {
// Hard-code class name string in case it changes in the future
def thisClassName: String = "org.apache.spark.mllib.tree.model.GradientBoostedTreesModel"
}
}
/**
* Represents a tree ensemble model.
*
* @param algo algorithm for the ensemble model, either Classification or Regression
* @param trees tree ensembles
* @param treeWeights tree ensemble weights
* @param combiningStrategy strategy for combining the predictions, not used for regression.
*/
private[tree] sealed class TreeEnsembleModel(
protected val algo: Algo,
protected val trees: Array[DecisionTreeModel],
protected val treeWeights: Array[Double],
protected val combiningStrategy: EnsembleCombiningStrategy) extends Serializable {
require(numTrees > 0, "TreeEnsembleModel cannot be created without trees.")
private val sumWeights = math.max(treeWeights.sum, 1e-15)
/**
* Predicts for a single data point using the weighted sum of ensemble predictions.
*
* @param features array representing a single data point
* @return predicted category from the trained model
*/
private def predictBySumming(features: Vector): Double = {
val treePredictions = trees.map(_.predict(features))
blas.ddot(numTrees, treePredictions, 1, treeWeights, 1)
}
/**
* Classifies a single data point based on (weighted) majority votes.
*/
private def predictByVoting(features: Vector): Double = {
val votes = mutable.Map.empty[Int, Double]
trees.view.zip(treeWeights).foreach { case (tree, weight) =>
val prediction = tree.predict(features).toInt
votes(prediction) = votes.getOrElse(prediction, 0.0) + weight
}
votes.maxBy(_._2)._1
}
/**
* Predict values for a single data point using the model trained.
*
* @param features array representing a single data point
* @return predicted category from the trained model
*/
def predict(features: Vector): Double = {
(algo, combiningStrategy) match {
case (Regression, Sum) =>
predictBySumming(features)
case (Regression, Average) =>
predictBySumming(features) / sumWeights
case (Classification, Sum) => // binary classification
val prediction = predictBySumming(features)
// TODO: predicted labels are +1 or -1 for GBT. Need a better way to store this info.
if (prediction > 0.0) 1.0 else 0.0
case (Classification, Vote) =>
predictByVoting(features)
case _ =>
throw new IllegalArgumentException(
"TreeEnsembleModel given unsupported (algo, combiningStrategy) combination: " +
s"($algo, $combiningStrategy).")
}
}
/**
* Predict values for the given data set.
*
* @param features RDD representing data points to be predicted
* @return RDD[Double] where each entry contains the corresponding prediction
*/
def predict(features: RDD[Vector]): RDD[Double] = features.map(x => predict(x))
/**
* Java-friendly version of [[org.apache.spark.mllib.tree.model.TreeEnsembleModel#predict]].
*/
def predict(features: JavaRDD[Vector]): JavaRDD[java.lang.Double] = {
predict(features.rdd).toJavaRDD().asInstanceOf[JavaRDD[java.lang.Double]]
}
/**
* Print a summary of the model.
*/
override def toString: String = {
algo match {
case Classification =>
s"TreeEnsembleModel classifier with $numTrees trees\\n"
case Regression =>
s"TreeEnsembleModel regressor with $numTrees trees\\n"
case _ => throw new IllegalArgumentException(
s"TreeEnsembleModel given unknown algo parameter: $algo.")
}
}
/**
* Print the full model to a string.
*/
def toDebugString: String = {
val header = toString + "\\n"
header + trees.zipWithIndex.map { case (tree, treeIndex) =>
s" Tree $treeIndex:\\n" + tree.topNode.subtreeToString(4)
}.fold("")(_ + _)
}
/**
* Get number of trees in ensemble.
*/
def numTrees: Int = trees.length
/**
* Get total number of nodes, summed over all trees in the ensemble.
*/
def totalNumNodes: Int = trees.map(_.numNodes).sum
}
private[tree] object TreeEnsembleModel extends Logging {
object SaveLoadV1_0 {
import org.apache.spark.mllib.tree.model.DecisionTreeModel.SaveLoadV1_0.{NodeData, constructTrees}
def thisFormatVersion: String = "1.0"
case class Metadata(
algo: String,
treeAlgo: String,
combiningStrategy: String,
treeWeights: Array[Double])
/**
* Model data for model import/export.
* We have to duplicate NodeData here since Spark SQL does not yet support extracting subfields
* of nested fields; once that is possible, we can use something like:
* case class EnsembleNodeData(treeId: Int, node: NodeData),
* where NodeData is from DecisionTreeModel.
*/
case class EnsembleNodeData(treeId: Int, node: NodeData)
def save(sc: SparkContext, path: String, model: TreeEnsembleModel, className: String): Unit = {
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
// SPARK-6120: We do a hacky check here so users understand why save() is failing
// when they run the ML guide example.
// TODO: Fix this issue for real.
val memThreshold = 768
if (sc.isLocal) {
val driverMemory = sc.getConf.getOption("spark.driver.memory")
.orElse(Option(System.getenv("SPARK_DRIVER_MEMORY")))
.map(Utils.memoryStringToMb)
.getOrElse(Utils.DEFAULT_DRIVER_MEM_MB)
if (driverMemory <= memThreshold) {
logWarning(s"$className.save() was called, but it may fail because of too little" +
s" driver memory (${driverMemory}m)." +
s" If failure occurs, try setting driver-memory ${memThreshold}m (or larger).")
}
} else {
if (sc.executorMemory <= memThreshold) {
logWarning(s"$className.save() was called, but it may fail because of too little" +
s" executor memory (${sc.executorMemory}m)." +
s" If failure occurs try setting executor-memory ${memThreshold}m (or larger).")
}
}
// Create JSON metadata.
implicit val format = DefaultFormats
val ensembleMetadata = Metadata(model.algo.toString, model.trees(0).algo.toString,
model.combiningStrategy.toString, model.treeWeights)
val metadata = compact(render(
("class" -> className) ~ ("version" -> thisFormatVersion) ~
("metadata" -> Extraction.decompose(ensembleMetadata))))
sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path))
// Create Parquet data.
val dataRDD = sc.parallelize(model.trees.zipWithIndex).flatMap { case (tree, treeId) =>
tree.topNode.subtreeIterator.toSeq.map(node => NodeData(treeId, node))
}.toDF()
dataRDD.write.parquet(Loader.dataPath(path))
}
/**
* Read metadata from the loaded JSON metadata.
*/
def readMetadata(metadata: JValue): Metadata = {
implicit val formats = DefaultFormats
(metadata \\ "metadata").extract[Metadata]
}
/**
* Load trees for an ensemble, and return them in order.
* @param path path to load the model from
* @param treeAlgo Algorithm for individual trees (which may differ from the ensemble's
* algorithm).
*/
def loadTrees(
sc: SparkContext,
path: String,
treeAlgo: String): Array[DecisionTreeModel] = {
val datapath = Loader.dataPath(path)
val sqlContext = new SQLContext(sc)
val nodes = sqlContext.read.parquet(datapath).map(NodeData.apply)
val trees = constructTrees(nodes)
trees.map(new DecisionTreeModel(_, Algo.fromString(treeAlgo)))
}
}
}
| pronix/spark | mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala | Scala | apache-2.0 | 17,849 |
package agni.std
import java.util.concurrent.{CompletableFuture, CompletionStage}
import agni.util.Par
import org.scalatest.flatspec.AnyFlatSpec
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
class AsyncSpec extends AnyFlatSpec {
import async._
val parF: Par.Aux[CompletionStage, Future] = implicitly
it should "convert to a Future" in {
val f = parF.parallel(CompletableFuture.completedStage(10))
assert(10 === Await.result(f, 3.seconds))
}
it should "convert to a failed Future when the passed computation fails" in {
class R extends Throwable
val f = parF.parallel(CompletableFuture.failedStage(new R))
assertThrows[R](Await.result(f, 3.seconds))
}
}
| tkrs/agni | core/src/test/scala/agni/std/AsyncSpec.scala | Scala | mit | 721 |
package com.github.scalaspring.scalatest
import org.scalatest.{FlatSpec, Matchers}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.ConfigurableApplicationContext
import org.springframework.context.annotation.Bean
import org.springframework.test.context.ContextConfiguration
@ContextConfiguration(classes = Array(classOf[TestContextManagementSpec.Configuration]))
class TestContextManagementSpec extends FlatSpec with TestContextManagement with Matchers {
@Autowired val applicationContext: ConfigurableApplicationContext = null
@Autowired val someSeq: Seq[String] = null
"Autowired properties" should "be non-null" in {
applicationContext should not be null
someSeq shouldEqual Seq("foo")
}
override def afterAll(): Unit = {
super.afterAll()
applicationContext.isActive shouldBe false
}
}
object TestContextManagementSpec {
@Configuration
class Configuration {
@Bean
def someSeq: Seq[String] = Seq("foo")
}
} | scalaspring/scalatest-spring | src/test/scala/com/github/scalaspring/scalatest/TestContextManagementSpec.scala | Scala | apache-2.0 | 1,012 |
package com.github.caiiiycuk.pg2sqlite
import scala.annotation.tailrec
import com.github.caiiiycuk.pg2sqlite.command._
import com.github.caiiiycuk.pg2sqlite.iterator.Line
import com.github.caiiiycuk.pg2sqlite.schema.Schema
object DumpInserter {
val COMMANDS = List(CreateTable, Copy, CreateIndex)
}
class DumpInserter(connection: Connection) {
import DumpInserter._
implicit val schema = new Schema()
@tailrec
final def insert(iterator: Iterator[Line]): Unit = {
if (iterator.hasNext) {
val head = iterator.next()
val fullIterator = Iterator(head) ++ iterator
COMMANDS.find(_.matchHead(head)).map { command =>
command.apply(connection, fullIterator)
}
insert(iterator)
}
}
}
| caiiiycuk/postgresql-to-sqlite | src/main/scala/com/github/caiiiycuk/pg2sqlite/DumpInserter.scala | Scala | mit | 742 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import java.io.File
import java.nio.ByteBuffer
import kafka.admin.AdminUtils
import kafka.api.{TopicMetadataRequest, TopicMetadataResponse}
import kafka.client.ClientUtils
import kafka.cluster.{Broker, BrokerEndPoint}
import kafka.common.ErrorMapping
import kafka.server.{KafkaConfig, KafkaServer, NotRunning}
import kafka.utils.TestUtils
import kafka.utils.TestUtils._
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.protocol.SecurityProtocol
import org.junit.Assert._
import org.junit.{Test, After, Before}
abstract class BaseTopicMetadataTest extends ZooKeeperTestHarness {
private var server1: KafkaServer = null
var brokerEndPoints: Seq[BrokerEndPoint] = null
var adHocConfigs: Seq[KafkaConfig] = null
val numConfigs: Int = 4
// This should be defined if `securityProtocol` uses SSL (eg SSL, SASL_SSL)
protected def trustStoreFile: Option[File]
protected def securityProtocol: SecurityProtocol
@Before
override def setUp() {
super.setUp()
val props = createBrokerConfigs(numConfigs, zkConnect, interBrokerSecurityProtocol = Some(securityProtocol),
trustStoreFile = trustStoreFile)
val configs: Seq[KafkaConfig] = props.map(KafkaConfig.fromProps)
adHocConfigs = configs.takeRight(configs.size - 1) // Started and stopped by individual test cases
server1 = TestUtils.createServer(configs.head)
brokerEndPoints = Seq(
// We are using the Scala clients and they don't support SSL. Once we move to the Java ones, we should use
// `securityProtocol` instead of PLAINTEXT below
new BrokerEndPoint(server1.config.brokerId, server1.config.hostName, server1.boundPort(SecurityProtocol.PLAINTEXT))
)
}
@After
override def tearDown() {
server1.shutdown()
super.tearDown()
}
@Test
def testTopicMetadataRequest {
// create topic
val topic = "test"
AdminUtils.createTopic(zkUtils, topic, 1, 1)
// create a topic metadata request
val topicMetadataRequest = new TopicMetadataRequest(List(topic), 0)
val serializedMetadataRequest = ByteBuffer.allocate(topicMetadataRequest.sizeInBytes + 2)
topicMetadataRequest.writeTo(serializedMetadataRequest)
serializedMetadataRequest.rewind()
val deserializedMetadataRequest = TopicMetadataRequest.readFrom(serializedMetadataRequest)
assertEquals(topicMetadataRequest, deserializedMetadataRequest)
}
@Test
def testBasicTopicMetadata {
// create topic
val topic = "test"
createTopic(zkUtils, topic, numPartitions = 1, replicationFactor = 1, servers = Seq(server1))
val topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata",
2000,0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
assertEquals("Expecting metadata only for 1 topic", 1, topicsMetadata.size)
assertEquals("Expecting metadata for the test topic", "test", topicsMetadata.head.topic)
val partitionMetadata = topicsMetadata.head.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadata.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadata.head.partitionId)
assertEquals(1, partitionMetadata.head.replicas.size)
}
@Test
def testGetAllTopicMetadata {
// create topic
val topic1 = "testGetAllTopicMetadata1"
val topic2 = "testGetAllTopicMetadata2"
createTopic(zkUtils, topic1, numPartitions = 1, replicationFactor = 1, servers = Seq(server1))
createTopic(zkUtils, topic2, numPartitions = 1, replicationFactor = 1, servers = Seq(server1))
// issue metadata request with empty list of topics
val topicsMetadata = ClientUtils.fetchTopicMetadata(Set.empty, brokerEndPoints, "TopicMetadataTest-testGetAllTopicMetadata",
2000, 0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(2, topicsMetadata.size)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.last.partitionsMetadata.head.errorCode)
val partitionMetadataTopic1 = topicsMetadata.head.partitionsMetadata
val partitionMetadataTopic2 = topicsMetadata.last.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadataTopic1.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadataTopic1.head.partitionId)
assertEquals(1, partitionMetadataTopic1.head.replicas.size)
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadataTopic2.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadataTopic2.head.partitionId)
assertEquals(1, partitionMetadataTopic2.head.replicas.size)
}
@Test
def testAutoCreateTopic {
// auto create topic
val topic = "testAutoCreateTopic"
var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), brokerEndPoints, "TopicMetadataTest-testAutoCreateTopic",
2000,0).topicsMetadata
assertEquals(ErrorMapping.LeaderNotAvailableCode, topicsMetadata.head.errorCode)
assertEquals("Expecting metadata only for 1 topic", 1, topicsMetadata.size)
assertEquals("Expecting metadata for the test topic", topic, topicsMetadata.head.topic)
assertEquals(0, topicsMetadata.head.partitionsMetadata.size)
// wait for leader to be elected
TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0)
TestUtils.waitUntilMetadataIsPropagated(Seq(server1), topic, 0)
// retry the metadata for the auto created topic
topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata",
2000,0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
val partitionMetadata = topicsMetadata.head.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadata.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadata.head.partitionId)
assertEquals(1, partitionMetadata.head.replicas.size)
assertTrue(partitionMetadata.head.leader.isDefined)
}
@Test
def testAutoCreateTopicWithCollision {
// auto create topic
val topic1 = "testAutoCreate_Topic"
val topic2 = "testAutoCreate.Topic"
var topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic1, topic2), brokerEndPoints, "TopicMetadataTest-testAutoCreateTopic",
2000,0).topicsMetadata
assertEquals("Expecting metadata for 2 topics", 2, topicsMetadata.size)
assertEquals("Expecting metadata for topic1", topic1, topicsMetadata.head.topic)
assertEquals(ErrorMapping.LeaderNotAvailableCode, topicsMetadata.head.errorCode)
assertEquals("Expecting metadata for topic2", topic2, topicsMetadata(1).topic)
assertEquals("Expecting InvalidTopicCode for topic2 metadata", ErrorMapping.InvalidTopicCode, topicsMetadata(1).errorCode)
// wait for leader to be elected
TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic1, 0)
TestUtils.waitUntilMetadataIsPropagated(Seq(server1), topic1, 0)
// retry the metadata for the first auto created topic
topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic1), brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata",
2000,0).topicsMetadata
assertEquals(ErrorMapping.NoError, topicsMetadata.head.errorCode)
assertEquals(ErrorMapping.NoError, topicsMetadata.head.partitionsMetadata.head.errorCode)
var partitionMetadata = topicsMetadata.head.partitionsMetadata
assertEquals("Expecting metadata for 1 partition", 1, partitionMetadata.size)
assertEquals("Expecting partition id to be 0", 0, partitionMetadata.head.partitionId)
assertEquals(1, partitionMetadata.head.replicas.size)
assertTrue(partitionMetadata.head.leader.isDefined)
}
private def checkIsr(servers: Seq[KafkaServer]): Unit = {
val activeBrokers: Seq[KafkaServer] = servers.filter(x => x.brokerState.currentState != NotRunning.state)
val expectedIsr: Seq[BrokerEndPoint] = activeBrokers.map(
x => new BrokerEndPoint(x.config.brokerId,
if (x.config.hostName.nonEmpty) x.config.hostName else "localhost",
x.boundPort())
)
// Assert that topic metadata at new brokers is updated correctly
activeBrokers.foreach(x => {
var metadata: TopicMetadataResponse = new TopicMetadataResponse(Seq(), Seq(), -1)
waitUntilTrue(() => {
metadata = ClientUtils.fetchTopicMetadata(
Set.empty,
Seq(new BrokerEndPoint(
x.config.brokerId,
if (x.config.hostName.nonEmpty) x.config.hostName else "localhost",
x.boundPort())),
"TopicMetadataTest-testBasicTopicMetadata",
2000, 0)
metadata.topicsMetadata.nonEmpty &&
metadata.topicsMetadata.head.partitionsMetadata.nonEmpty &&
expectedIsr.sortBy(_.id) == metadata.topicsMetadata.head.partitionsMetadata.head.isr.sortBy(_.id)
},
"Topic metadata is not correctly updated for broker " + x + ".\n" +
"Expected ISR: " + expectedIsr + "\n" +
"Actual ISR : " + (if (metadata.topicsMetadata.nonEmpty &&
metadata.topicsMetadata.head.partitionsMetadata.nonEmpty)
metadata.topicsMetadata.head.partitionsMetadata.head.isr
else
""), 8000L)
})
}
@Test
def testIsrAfterBrokerShutDownAndJoinsBack {
val numBrokers = 2 //just 2 brokers are enough for the test
// start adHoc brokers
val adHocServers = adHocConfigs.take(numBrokers - 1).map(p => createServer(p))
val allServers: Seq[KafkaServer] = Seq(server1) ++ adHocServers
// create topic
val topic: String = "test"
AdminUtils.createTopic(zkUtils, topic, 1, numBrokers)
// shutdown a broker
adHocServers.last.shutdown()
adHocServers.last.awaitShutdown()
// startup a broker
adHocServers.last.startup()
// check metadata is still correct and updated at all brokers
checkIsr(allServers)
// shutdown adHoc brokers
adHocServers.map(p => p.shutdown())
}
private def checkMetadata(servers: Seq[KafkaServer], expectedBrokersCount: Int): Unit = {
var topicMetadata: TopicMetadataResponse = new TopicMetadataResponse(Seq(), Seq(), -1)
// Get topic metadata from old broker
// Wait for metadata to get updated by checking metadata from a new broker
waitUntilTrue(() => {
topicMetadata = ClientUtils.fetchTopicMetadata(
Set.empty, brokerEndPoints, "TopicMetadataTest-testBasicTopicMetadata", 2000, 0)
topicMetadata.brokers.size == expectedBrokersCount},
"Alive brokers list is not correctly propagated by coordinator to brokers"
)
// Assert that topic metadata at new brokers is updated correctly
servers.filter(x => x.brokerState.currentState != NotRunning.state).foreach(x =>
waitUntilTrue(() =>
topicMetadata == ClientUtils.fetchTopicMetadata(
Set.empty,
Seq(new Broker(x.config.brokerId,
x.config.hostName,
x.boundPort()).getBrokerEndPoint(SecurityProtocol.PLAINTEXT)),
"TopicMetadataTest-testBasicTopicMetadata",
2000, 0), "Topic metadata is not correctly updated"))
}
@Test
def testAliveBrokerListWithNoTopics {
checkMetadata(Seq(server1), 1)
}
@Test
def testAliveBrokersListWithNoTopicsAfterNewBrokerStartup {
var adHocServers = adHocConfigs.takeRight(adHocConfigs.size - 1).map(p => createServer(p))
checkMetadata(adHocServers, numConfigs - 1)
// Add a broker
adHocServers = adHocServers ++ Seq(createServer(adHocConfigs.head))
checkMetadata(adHocServers, numConfigs)
adHocServers.map(p => p.shutdown())
}
@Test
def testAliveBrokersListWithNoTopicsAfterABrokerShutdown {
val adHocServers = adHocConfigs.map(p => createServer(p))
checkMetadata(adHocServers, numConfigs)
// Shutdown a broker
adHocServers.last.shutdown()
adHocServers.last.awaitShutdown()
checkMetadata(adHocServers, numConfigs - 1)
adHocServers.map(p => p.shutdown())
}
}
| racker/kafka | core/src/test/scala/unit/kafka/integration/BaseTopicMetadataTest.scala | Scala | apache-2.0 | 13,458 |
package io.udash.web.guide.views.rpc.demos
import io.udash._
import io.udash.bootstrap.button.UdashButton
import io.udash.bootstrap.utils.BootstrapStyles.Color
import io.udash.logging.CrossLogging
import io.udash.web.commons.views.Component
import io.udash.web.guide.Context
import io.udash.web.guide.demos.rpc.GenCodecServerRPC
import io.udash.web.guide.styles.partials.GuideStyles
import scala.util.{Failure, Random, Success}
import scalatags.JsDom
import scalatags.JsDom.all._
trait GenCodecsDemoModel {
import io.udash.web.guide.demos.rpc.GenCodecServerRPC._
def int: Option[Int]
def double: Option[Double]
def string: Option[String]
def seq: Option[Seq[String]]
def map: Option[Seq[(String, Int)]]
def caseClass: Option[DemoCaseClass]
def clsInt: Option[Int]
def clsString: Option[String]
def clsVar: Option[Int]
def sealedTrait: Option[Fruit]
}
object GenCodecsDemoModel extends HasModelPropertyCreator[GenCodecsDemoModel] {
implicit val blank: Blank[GenCodecsDemoModel] = Blank.Simple(new GenCodecsDemoModel {
override def int: Option[Int] = None
override def double: Option[Double] = None
override def string: Option[String] = None
override def seq: Option[Seq[String]] = None
override def map: Option[Seq[(String, Int)]] = None
override def caseClass: Option[GenCodecServerRPC.DemoCaseClass] = None
override def clsInt: Option[Int] = None
override def clsString: Option[String] = None
override def clsVar: Option[Int] = None
override def sealedTrait: Option[GenCodecServerRPC.Fruit] = None
})
}
class GenCodecsDemoComponent extends Component with CrossLogging {
import Context._
import io.udash.web.guide.demos.rpc.GenCodecServerRPC._
override def getTemplate: Modifier = GenCodecsDemoViewFactory()
object GenCodecsDemoViewFactory {
def apply(): Modifier = {
val model = ModelProperty.blank[GenCodecsDemoModel]
val presenter = new GenCodecsDemoPresenter(model)
new GenCodecsDemoView(model, presenter).render
}
}
class GenCodecsDemoPresenter(model: ModelProperty[GenCodecsDemoModel]) {
def randomString(l: Int): String =
BigInt.probablePrime(32, Random).toString(16)
def onButtonClick() = {
val demoRpc: GenCodecServerRPC = Context.serverRpc.demos().gencodecsDemo()
demoRpc.sendInt(Random.nextInt()) onComplete {
case Success(response) => model.subProp(_.int).set(Some(response))
case Failure(ex) => logger.error(ex.getMessage)
}
demoRpc.sendDouble(Random.nextLong().toDouble * 2e20) onComplete {
case Success(response) => model.subProp(_.double).set(Some(response))
case Failure(ex) => logger.error(ex.getMessage)
}
demoRpc.sendString(randomString(10)) onComplete {
case Success(response) => model.subProp(_.string).set(Some(response))
case Failure(ex) => logger.error(ex.getMessage)
}
demoRpc.sendSeq(Seq(randomString(5), randomString(5))) onComplete {
case Success(response) => model.subProp(_.seq).set(Some(response))
case Failure(ex) => logger.error(ex.getMessage)
}
demoRpc.sendMap(Map(randomString(5) -> Random.nextInt(), randomString(5) -> Random.nextInt())) onComplete {
case Success(response) => model.subProp(_.map).set(Some(response.toSeq))
case Failure(ex) => logger.error(ex.getMessage)
}
demoRpc.sendCaseClass(DemoCaseClass(Random.nextInt(), randomString(5), 42)) onComplete {
case Success(response) => model.subProp(_.caseClass).set(Some(response))
case Failure(ex) => logger.error(ex.getMessage)
}
demoRpc.sendClass(new DemoClass(Random.nextInt(), randomString(5))) onComplete {
case Success(response) =>
model.subProp(_.clsInt).set(Some(response.i))
model.subProp(_.clsString).set(Some(response.s))
model.subProp(_.clsVar).set(Some(response._v))
case Failure(ex) => logger.error(ex.getMessage)
}
demoRpc.sendSealedTrait(Seq(Fruit.Apple, Fruit.Orange, Fruit.Banana)(Random.nextInt(3))) onComplete {
case Success(response) => model.subProp(_.sealedTrait).set(Some(response))
case Failure(ex) => logger.error(ex.getMessage)
}
}
}
class GenCodecsDemoView(model: ModelProperty[GenCodecsDemoModel], presenter: GenCodecsDemoPresenter) {
import JsDom.all._
val loadDisabled = Property(false)
val loadIdButton = UdashButton(
buttonStyle = Color.Primary.toProperty,
disabled = loadDisabled,
componentId = ComponentId("gencodec-demo")
)(_ => "Send request")
loadIdButton.listen {
case UdashButton.ButtonClickEvent(_, _) =>
loadDisabled.set(true)
presenter.onButtonClick()
}
def render: Modifier = span(GuideStyles.frame, GuideStyles.useBootstrap)(
loadIdButton.render,
h3("Results:"),
p(
ul(
li("Int: ", produce(model.subProp(_.int))(response => span(id := "gencodec-demo-int", response).render)),
li("Double: ", produce(model.subProp(_.double))(response => span(id := "gencodec-demo-double", response).render)),
li("String: ", produce(model.subProp(_.string))(response => span(id := "gencodec-demo-string", response).render)),
li("Seq[String]: ", produce(model.subProp(_.seq))(response => span(id := "gencodec-demo-seq", response.map(_.toString)).render)),
li("Map[String, Int]: ", produce(model.subProp(_.map))(response => span(id := "gencodec-demo-map", response.map(_.toString)).render)),
li("DemoCaseClass: ", produce(model.subProp(_.caseClass))(response => span(id := "gencodec-demo-caseClass", response.map(_.toString)).render)),
li("DemoClass Int: ", produce(model.subProp(_.clsInt))(response => span(id := "gencodec-demo-cls-int", response).render)),
li("DemoClass String: ", produce(model.subProp(_.clsString))(response => span(id := "gencodec-demo-cls-string", response).render)),
li("DemoClass Var: ", produce(model.subProp(_.clsVar))(response => span(id := "gencodec-demo-cls-var", response).render)),
li("Fruit: ", produce(model.subProp(_.sealedTrait))(response => span(id := "gencodec-demo-sealedTrait", response.map(_.toString)).render))
)
)
)
}
}
| UdashFramework/udash-guide | guide/src/main/scala/io/udash/web/guide/views/rpc/demos/GenCodecsDemoComponent.scala | Scala | gpl-3.0 | 6,294 |
package com.arcusys.valamis.lesson.model
import org.joda.time.DateTime
case class UserLessonResult(lessonId: Long,
userId: Long,
attemptsCount: Int,
lastAttemptDate: Option[DateTime],
isSuspended: Boolean,
isFinished: Boolean,
score: Option[Float] = None)
| igor-borisov/valamis | valamis-lesson/src/main/scala/com/arcusys/valamis/lesson/model/UserLessonResult.scala | Scala | gpl-3.0 | 426 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.ui
import java.util.Properties
import scala.collection.mutable.ListBuffer
import org.json4s.jackson.JsonMethods._
import org.apache.spark._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.internal.config
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler._
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.sql.execution.{LeafExecNode, QueryExecution, SparkPlanInfo, SQLExecution}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.sql.internal.StaticSQLConf.UI_RETAINED_EXECUTIONS
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.status.ElementTrackingStore
import org.apache.spark.status.config._
import org.apache.spark.util.{AccumulatorMetadata, JsonProtocol, LongAccumulator}
import org.apache.spark.util.kvstore.InMemoryStore
class SQLAppStatusListenerSuite extends SparkFunSuite with SharedSQLContext with JsonTestUtils {
import testImplicits._
override protected def sparkConf = {
super.sparkConf.set(LIVE_ENTITY_UPDATE_PERIOD, 0L).set(ASYNC_TRACKING_ENABLED, false)
}
private def createTestDataFrame: DataFrame = {
Seq(
(1, 1),
(2, 2)
).toDF().filter("_1 > 1")
}
private def createProperties(executionId: Long): Properties = {
val properties = new Properties()
properties.setProperty(SQLExecution.EXECUTION_ID_KEY, executionId.toString)
properties
}
private def createStageInfo(stageId: Int, attemptId: Int): StageInfo = {
new StageInfo(stageId = stageId,
attemptId = attemptId,
// The following fields are not used in tests
name = "",
numTasks = 0,
rddInfos = Nil,
parentIds = Nil,
details = "")
}
private def createTaskInfo(
taskId: Int,
attemptNumber: Int,
accums: Map[Long, Long] = Map.empty): TaskInfo = {
val info = new TaskInfo(
taskId = taskId,
attemptNumber = attemptNumber,
// The following fields are not used in tests
index = 0,
launchTime = 0,
executorId = "",
host = "",
taskLocality = null,
speculative = false)
info.markFinished(TaskState.FINISHED, 1L)
info.setAccumulables(createAccumulatorInfos(accums))
info
}
private def createAccumulatorInfos(accumulatorUpdates: Map[Long, Long]): Seq[AccumulableInfo] = {
accumulatorUpdates.map { case (id, value) =>
val acc = new LongAccumulator
acc.metadata = AccumulatorMetadata(id, None, false)
acc.toInfo(Some(value), None)
}.toSeq
}
private def assertJobs(
exec: Option[SQLExecutionUIData],
running: Seq[Int] = Nil,
completed: Seq[Int] = Nil,
failed: Seq[Int] = Nil): Unit = {
val actualRunning = new ListBuffer[Int]()
val actualCompleted = new ListBuffer[Int]()
val actualFailed = new ListBuffer[Int]()
exec.get.jobs.foreach { case (jobId, jobStatus) =>
jobStatus match {
case JobExecutionStatus.RUNNING => actualRunning += jobId
case JobExecutionStatus.SUCCEEDED => actualCompleted += jobId
case JobExecutionStatus.FAILED => actualFailed += jobId
case _ => fail(s"Unexpected status $jobStatus")
}
}
assert(actualRunning.sorted === running)
assert(actualCompleted.sorted === completed)
assert(actualFailed.sorted === failed)
}
private def createStatusStore(): SQLAppStatusStore = {
val conf = sparkContext.conf
val store = new ElementTrackingStore(new InMemoryStore, conf)
val listener = new SQLAppStatusListener(conf, store, live = true)
new SQLAppStatusStore(store, Some(listener))
}
test("basic") {
def checkAnswer(actual: Map[Long, String], expected: Map[Long, Long]): Unit = {
assert(actual.size == expected.size)
expected.foreach { case (id, value) =>
// The values in actual can be SQL metrics meaning that they contain additional formatting
// when converted to string. Verify that they start with the expected value.
// TODO: this is brittle. There is no requirement that the actual string needs to start
// with the accumulator value.
assert(actual.contains(id))
val v = actual.get(id).get.trim
assert(v.startsWith(value.toString), s"Wrong value for accumulator $id")
}
}
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
val accumulatorIds =
SparkPlanGraph(SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan))
.allNodes.flatMap(_.metrics.map(_.accumulatorId))
// Assume all accumulators are long
var accumulatorValue = 0L
val accumulatorUpdates = accumulatorIds.map { id =>
accumulatorValue += 1L
(id, accumulatorValue)
}.toMap
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq(
createStageInfo(0, 0),
createStageInfo(1, 0)
),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 0)))
assert(statusStore.executionMetrics(executionId).isEmpty)
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 0, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
// Driver accumulator updates don't belong to this execution should be filtered and no
// exception will be thrown.
listener.onOtherEvent(SparkListenerDriverAccumUpdates(0, Seq((999L, 2L))))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 0, createAccumulatorInfos(accumulatorUpdates.mapValues(_ * 2)))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 3))
// Retrying a stage should reset the metrics
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 1)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 1, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 1, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
// Ignore the task end for the first attempt
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 100)),
null))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 2)),
null))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(1, 0, accums = accumulatorUpdates.mapValues(_ * 3)),
null))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 5))
// Summit a new stage
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(1, 0)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 1, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 1, 0, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 7))
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 3)),
null))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(1, 0, accums = accumulatorUpdates.mapValues(_ * 3)),
null))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 11))
assertJobs(statusStore.execution(executionId), running = Seq(0))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
assertJobs(statusStore.execution(executionId), completed = Seq(0))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 11))
}
test("onExecutionEnd happens before onJobEnd(JobSucceeded)") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
assertJobs(statusStore.execution(executionId), completed = Seq(0))
}
test("onExecutionEnd happens before multiple onJobEnd(JobSucceeded)s") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onJobStart(SparkListenerJobStart(
jobId = 1,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 1,
time = System.currentTimeMillis(),
JobSucceeded
))
assertJobs(statusStore.execution(executionId), completed = Seq(0, 1))
}
test("onExecutionEnd happens before onJobEnd(JobFailed)") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq.empty,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobFailed(new RuntimeException("Oops"))
))
assertJobs(statusStore.execution(executionId), failed = Seq(0))
}
test("handle one execution with multiple jobs") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
var stageId = 0
def twoStageJob(jobId: Int): Unit = {
val stages = Seq(stageId, stageId + 1).map { id => createStageInfo(id, 0)}
stageId += 2
listener.onJobStart(SparkListenerJobStart(
jobId = jobId,
time = System.currentTimeMillis(),
stageInfos = stages,
createProperties(executionId)))
stages.foreach { s =>
listener.onStageSubmitted(SparkListenerStageSubmitted(s))
listener.onStageCompleted(SparkListenerStageCompleted(s))
}
listener.onJobEnd(SparkListenerJobEnd(
jobId = jobId,
time = System.currentTimeMillis(),
JobSucceeded
))
}
// submit two jobs with the same executionId
twoStageJob(0)
twoStageJob(1)
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
assertJobs(statusStore.execution(0), completed = 0 to 1)
assert(statusStore.execution(0).get.stages === (0 to 3).toSet)
}
test("SPARK-11126: no memory leak when running non SQL jobs") {
val listener = spark.sharedState.statusStore.listener.get
// At the beginning of this test case, there should be no live data in the listener.
assert(listener.noLiveData())
spark.sparkContext.parallelize(1 to 10).foreach(i => ())
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
// Listener should ignore the non-SQL stages, as the stage data are only removed when SQL
// execution ends, which will not be triggered for non-SQL jobs.
assert(listener.noLiveData())
}
test("driver side SQL metrics") {
val statusStore = spark.sharedState.statusStore
val oldCount = statusStore.executionsList().size
val expectedAccumValue = 12345
val expectedAccumValue2 = 54321
val physicalPlan = MyPlan(sqlContext.sparkContext, expectedAccumValue, expectedAccumValue2)
val dummyQueryExecution = new QueryExecution(spark, LocalRelation()) {
override lazy val sparkPlan = physicalPlan
override lazy val executedPlan = physicalPlan
}
SQLExecution.withNewExecutionId(spark, dummyQueryExecution) {
physicalPlan.execute().collect()
}
// Wait until the new execution is started and being tracked.
while (statusStore.executionsCount() < oldCount) {
Thread.sleep(100)
}
// Wait for listener to finish computing the metrics for the execution.
while (statusStore.executionsList().last.metricValues == null) {
Thread.sleep(100)
}
val execId = statusStore.executionsList().last.executionId
val metrics = statusStore.executionMetrics(execId)
val driverMetric = physicalPlan.metrics("dummy")
val driverMetric2 = physicalPlan.metrics("dummy2")
val expectedValue = SQLMetrics.stringValue(driverMetric.metricType, Seq(expectedAccumValue))
val expectedValue2 = SQLMetrics.stringValue(driverMetric2.metricType, Seq(expectedAccumValue2))
assert(metrics.contains(driverMetric.id))
assert(metrics(driverMetric.id) === expectedValue)
assert(metrics.contains(driverMetric2.id))
assert(metrics(driverMetric2.id) === expectedValue2)
}
test("roundtripping SparkListenerDriverAccumUpdates through JsonProtocol (SPARK-18462)") {
val event = SparkListenerDriverAccumUpdates(1L, Seq((2L, 3L)))
val json = JsonProtocol.sparkEventToJson(event)
assertValidDataInJson(json,
parse("""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 1,
| "accumUpdates": [[2,3]]
|}
""".stripMargin))
JsonProtocol.sparkEventFromJson(json) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 1L)
accums.foreach { case (a, b) =>
assert(a == 2L)
assert(b == 3L)
}
}
// Test a case where the numbers in the JSON can only fit in longs:
val longJson = parse(
"""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 4294967294,
| "accumUpdates": [[4294967294,3]]
|}
""".stripMargin)
JsonProtocol.sparkEventFromJson(longJson) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 4294967294L)
accums.foreach { case (a, b) =>
assert(a == 4294967294L)
assert(b == 3L)
}
}
}
test("eviction should respect execution completion time") {
val conf = sparkContext.conf.clone().set(UI_RETAINED_EXECUTIONS.key, "2")
val store = new ElementTrackingStore(new InMemoryStore, conf)
val listener = new SQLAppStatusListener(conf, store, live = true)
val statusStore = new SQLAppStatusStore(store, Some(listener))
var time = 0
val df = createTestDataFrame
// Start execution 1 and execution 2
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
1,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
2,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
// Stop execution 2 before execution 1
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionEnd(2, time))
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionEnd(1, time))
// Start execution 3 and execution 2 should be evicted.
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
3,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
assert(statusStore.executionsCount === 2)
assert(statusStore.execution(2) === None)
}
}
/**
* A dummy [[org.apache.spark.sql.execution.SparkPlan]] that updates a [[SQLMetrics]]
* on the driver.
*/
private case class MyPlan(sc: SparkContext, expectedValue: Long, expectedValue2: Long)
extends LeafExecNode {
override def sparkContext: SparkContext = sc
override def output: Seq[Attribute] = Seq()
override val metrics: Map[String, SQLMetric] = Map(
"dummy" -> SQLMetrics.createMetric(sc, "dummy"),
"dummy2" -> SQLMetrics.createMetric(sc, "dummy2"))
override def doExecute(): RDD[InternalRow] = {
longMetric("dummy") += expectedValue
longMetric("dummy2") += expectedValue2
// postDriverMetricUpdates may happen multiple time in a query.
// (normally from different operators, but for the sake of testing, from one operator)
SQLMetrics.postDriverMetricUpdates(
sc,
sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY),
Seq(metrics("dummy")))
SQLMetrics.postDriverMetricUpdates(
sc,
sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY),
Seq(metrics("dummy2")))
sc.emptyRDD
}
}
class SQLAppStatusListenerMemoryLeakSuite extends SparkFunSuite {
test("no memory leak") {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set(config.MAX_TASK_FAILURES, 1) // Don't retry the tasks to run this test quickly
.set("spark.sql.ui.retainedExecutions", "50") // Set it to 50 to run this test quickly
.set(ASYNC_TRACKING_ENABLED, false)
withSpark(new SparkContext(conf)) { sc =>
quietly {
val spark = new SparkSession(sc)
import spark.implicits._
// Run 100 successful executions and 100 failed executions.
// Each execution only has one job and one stage.
for (i <- 0 until 100) {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
df.collect()
try {
df.foreach(_ => throw new RuntimeException("Oops"))
} catch {
case e: SparkException => // This is expected for a failed job
}
}
sc.listenerBus.waitUntilEmpty(10000)
val statusStore = spark.sharedState.statusStore
assert(statusStore.executionsCount() <= 50)
assert(statusStore.planGraphCount() <= 50)
// No live data should be left behind after all executions end.
assert(statusStore.listener.get.noLiveData())
}
}
}
}
| ahnqirage/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala | Scala | apache-2.0 | 22,423 |
package com.twitter.finagle.postgres.values
import java.nio.charset.Charset
import scala.util.parsing.combinator.RegexParsers
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
object HStores {
object HStoreStringParser extends RegexParsers {
def key:Parser[String] = "\"" ~ """([^"\\]*(\\.[^"\\]*)*)""".r ~ "\"" ^^ {
case o~value~c => value.replace("\\\"", "\"").replace("\\\\", "\\")
}
def value = key | "NULL"
def item:Parser[(String, Option[String])] = key ~ "=>" ~ value ^^ {
case key~arrow~"NULL" => (key, None)
case key~arrow~value => (key, Some(value))
}
def items:Parser[Map[String, Option[String]]] = repsep(item, ", ") ^^ { l => l.toMap }
def apply(input:String):Option[Map[String, Option[String]]] = parseAll(items, input) match {
case Success(result, _) => Some(result)
case failure:NoSuccess => None
}
}
def parseHStoreString(str: String) = HStoreStringParser(str)
def formatHStoreString(hstore: Map[String, Option[String]]) = hstore.map {
case (k, v) =>
val key = s""""${k.replace("\"", "\\\"")}""""
val value = v.map(str => s""""${str.replace("\"", "\\\"")}"""").getOrElse("NULL")
s"""$key => $value"""
}.mkString(",")
def decodeHStoreBinary(buf: ChannelBuffer, charset: Charset) = {
val count = buf.readInt()
val pairs = Array.fill(count) {
val keyLength = buf.readInt()
val key = Array.fill(keyLength)(buf.readByte())
val valueLength = buf.readInt()
val value = valueLength match {
case -1 => None
case l =>
val valueBytes = Array.fill(l)(buf.readByte())
Some(valueBytes)
}
new String(key, charset) -> value.map(new String(_, charset))
}
pairs.toMap
}
def encodeHStoreBinary(hstore: Map[String, Option[String]], charset: Charset) = {
val buf = ChannelBuffers.dynamicBuffer()
buf.writeInt(hstore.size)
hstore foreach {
case (key, value) =>
val keyBytes = key.getBytes(charset)
buf.writeInt(keyBytes.length)
buf.writeBytes(keyBytes)
value match {
case None => buf.writeInt(-1)
case Some(v) =>
val valueBytes = v.getBytes(charset)
buf.writeInt(valueBytes.length)
buf.writeBytes(valueBytes)
}
}
buf
}
}
| jeremyrsmith/finagle-postgres | src/main/scala/com/twitter/finagle/postgres/values/HStores.scala | Scala | apache-2.0 | 2,353 |
package com.kalmanb.sbt
import sbt.Keys._
import sbt.Load.BuildStructure
import sbt._
object DependencyBuilderPlugin extends Plugin {
val taskKey = TaskKey[Unit]("publishLocalAll", "Will publishLocal the current projects. If during update it can't find a module that exists in the build file it will build it")
val dependencyBuilderSettings = Seq[Setting[_]](
taskKey <<= (thisProjectRef, buildStructure, state) map {
(thisProjectRef, structure, state) ⇒
update(thisProjectRef, state)
}
)
def update(project: ProjectRef, state: State): Unit = {
val missingDependencies: Seq[ModuleID] = getMissingDependencies(project, state)
val allProjectRefs = Project.extract(state).structure.allProjectRefs
val modulesToBuild = allProjectRefs.filter(ref ⇒ missingDependencies.exists(d ⇒ d.name startsWith ref.project))
modulesToBuild foreach (update(_, state))
evaluateTask(Keys.publishLocal in configuration, project, state)
}
def getMissingDependencies(ref: ProjectRef, state: State): Seq[ModuleID] = {
evaluateTask(Keys.update in configuration, ref, state) match {
case Some((_, Inc(inc))) ⇒ {
inc.causes.flatMap(_.directCause.map(_ match {
case e: sbt.ResolveException ⇒ e.failed
case _ ⇒ Seq.empty
}))
}.flatten
case _ ⇒ Seq.empty
}
}
def publishLocalModule(ref: ProjectRef, state: State): Unit = {
println("Publishing " + ref)
evaluateTask(Keys.publishLocal in configuration, ref, state)
}
def evaluateTask[A](key: TaskKey[A], ref: ProjectRef, state: State): Option[(sbt.State, sbt.Result[A])] = {
EvaluateTask(Project.extract(state).structure, key, state, ref, EvaluateTask defaultConfig state)
}
}
| kalmanb/sbt-dependency-builder | src/main/scala/com/kalmanb/sbt/DependencyBuilderPlugin.scala | Scala | apache-2.0 | 1,779 |
package org.nisshiee.toban.controller.api
import scalaz._, Scalaz._
import play.api._
import play.api.mvc._
import play.api.db._
import play.api.Play.current
import play.api.libs.json._, Json._
import play.api.libs.Jsonp
import org.nisshiee.toban.model._
import org.nisshiee.toban.controller.ControllerHelper
object TobanController extends Controller with ControllerHelper {
def get(taskId: Int, dateStr: String, callback: String) = ApiAction {
val tobanOpt = for {
date <- str2DateOpt(dateStr)
toban <- DB.withTransaction { implicit c => Toban.find(taskId, date) }
} yield toban
tobanOpt ∘ toJson[Toban] | toJson(Map[String, String]()) |> { js =>
callback match {
case "" => Ok(js)
case c => Ok(Jsonp(c, js))
}
}
}
}
| nisshiee/to-ban | app/controllers/api/TobanController.scala | Scala | mit | 787 |
/* Copyright 2012-2015 Micronautics Research Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License. */
package com.micronautics.aws
/*
import org.scalatest._
import org.scalatestplus.play._
import play.api.mvc.Results
import play.api.test._
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.language.postfixOps
class SNSTest extends PlaySpecServer {
import play.api.libs.ws.WS
import play.api.mvc._
import play.api.test.FakeApplication
// Override app if you need a FakeApplication with other than default parameters.
implicit override lazy val app: FakeApplication =
FakeApplication(
//additionalConfiguration = Map("ehcacheplugin" -> "disabled"),
withRoutes = {
case ("GET", "/") =>
Action { Ok("Got root") }
case ("GET", "/lectures/sns/transcodingDone") =>
// topic.publish("Hello from Amazon SNS!")
// topic.delete()
Action { Ok("Got callback") }
}
)
implicit val snsClient = sns.snsClient
val subscriberUrl = Option(System.getenv("TRANSCODER_SUBSCRIPTION_URL")).getOrElse("http://bear64.no-ip.info:9000")
"blah" must {
"test server logic" in {
val myPublicAddress = s"localhost:$port"
val testURL = s"http://$myPublicAddress"
// The test payment gateway requires a callback to this server before it returns a result...
val callbackURL = s"http://$myPublicAddress/callback"
// await is from play.api.test.FutureAwaits
val response = Await.result(WS.url(testURL).withQueryString("callbackURL" -> callbackURL).get(), 10 seconds)
//response.status mustBe OK
}
}
"SNS" must {
"manipulate topics" in {
sns.findOrCreateTopic("TestTopic").map { topic =>
topic.subscribe(s"$subscriberUrl/lectures/sns/transcodingDone".asUrl)
}.orElse { fail() }
()
}
}
}
*/
| mslinn/awslib_scala | src/test/scala/com/micronautics/aws/SNSTest.scala | Scala | mit | 2,377 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.metrics.reporter
import javax.management.MBeanServer
import javax.management.ObjectName
import junit.framework.Assert.assertEquals
import org.apache.samza.metrics.JmxUtil
import org.apache.samza.metrics.MetricsRegistryMap
import org.junit.Test
import org.mockito.Matchers
import org.mockito.Mockito.mock
import org.mockito.Mockito.times
import org.mockito.Mockito.verify
import org.mockito.Mockito.when
import org.mockito.Mockito.reset
class TestJmxReporter {
val REPORTER_SOURCE = "test"
@Test
def testJmxReporter {
val metricGroup = "org.apache.samza.metrics.JvmMetrics"
val metricName = "mem-non-heap-used-mb"
val objectName: ObjectName = JmxUtil.getObjectName(metricGroup, metricName, REPORTER_SOURCE)
val registry: MetricsRegistryMap = new MetricsRegistryMap
val mBeanServerMock: MBeanServer = mock(classOf[MBeanServer])
// Create dummy test metric.
registry.newCounter(metricGroup, metricName)
when(mBeanServerMock.isRegistered(objectName)).thenReturn(false)
val reporter = new JmxReporter(mBeanServerMock)
reporter.register(REPORTER_SOURCE, registry)
reporter.start
verify(mBeanServerMock, times(1)).registerMBean(Matchers.anyObject(), Matchers.eq(objectName))
reset(mBeanServerMock)
// Create dummy counter to test metrics reporting through listener.
registry.newCounter(metricGroup, metricName)
assertEquals(1, registry.listeners.size)
verify(mBeanServerMock, times(1)).registerMBean(Matchers.anyObject(), Matchers.eq(objectName))
reporter.stop
}
}
| prateekm/samza | samza-core/src/test/scala/org/apache/samza/metrics/reporter/TestJmxReporter.scala | Scala | apache-2.0 | 2,384 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package collection
package immutable
trait Seq[+A] extends Iterable[A]
with collection.Seq[A]
with SeqOps[A, Seq, Seq[A]]
with IterableFactoryDefaults[A, Seq] {
override final def toSeq: this.type = this
override def iterableFactory: SeqFactory[Seq] = Seq
}
/**
* @define coll immutable sequence
* @define Coll `immutable.Seq`
*/
trait SeqOps[+A, +CC[_], +C] extends Any with collection.SeqOps[A, CC, C]
/**
* $factoryInfo
* @define coll immutable sequence
* @define Coll `immutable.Seq`
*/
@SerialVersionUID(3L)
object Seq extends SeqFactory.Delegate[Seq](List) {
override def from[E](it: IterableOnce[E]): Seq[E] = it match {
case s: Seq[E] => s
case _ => super.from(it)
}
}
/** Base trait for immutable indexed sequences that have efficient `apply` and `length` */
trait IndexedSeq[+A] extends Seq[A]
with collection.IndexedSeq[A]
with IndexedSeqOps[A, IndexedSeq, IndexedSeq[A]]
with IterableFactoryDefaults[A, IndexedSeq] {
final override def toIndexedSeq: IndexedSeq[A] = this
override def canEqual(that: Any): Boolean = that match {
case otherIndexedSeq: IndexedSeq[_] => length == otherIndexedSeq.length && super.canEqual(that)
case _ => super.canEqual(that)
}
override def sameElements[B >: A](o: IterableOnce[B]): Boolean = o match {
case that: IndexedSeq[_] =>
(this eq that) || {
val length = this.length
var equal = length == that.length
if (equal) {
var index = 0
// some IndexedSeq apply is less efficient than using Iterators
// e.g. Vector so we can compare the first few with apply and the rest with an iterator
// but if apply is more efficient than Iterators then we can use the apply for all the comparison
// we default to the minimum preferred length
val maxApplyCompare = {
val preferredLength = Math.min(applyPreferredMaxLength, that.applyPreferredMaxLength)
if (length > (preferredLength.toLong << 1)) preferredLength else length
}
while (index < maxApplyCompare && equal) {
equal = this (index) == that(index)
index += 1
}
if ((index < length) && equal) {
val thisIt = this.iterator.drop(index)
val thatIt = that.iterator.drop(index)
while (equal && thisIt.hasNext) {
equal = thisIt.next() == thatIt.next()
}
}
}
equal
}
case _ => super.sameElements(o)
}
/** a hint to the runtime when scanning values
* [[apply]] is preferred for scan with a max index less than this value
* [[iterator]] is preferred for scans above this range
* @return a hint about when to use [[apply]] or [[iterator]]
*/
protected def applyPreferredMaxLength: Int = IndexedSeqDefaults.defaultApplyPreferredMaxLength
override def iterableFactory: SeqFactory[IndexedSeq] = IndexedSeq
}
object IndexedSeqDefaults {
val defaultApplyPreferredMaxLength: Int =
try System.getProperty(
"scala.collection.immutable.IndexedSeq.defaultApplyPreferredMaxLength", "64").toInt
catch {
case _: SecurityException => 64
}
}
@SerialVersionUID(3L)
object IndexedSeq extends SeqFactory.Delegate[IndexedSeq](Vector) {
override def from[E](it: IterableOnce[E]): IndexedSeq[E] = it match {
case is: IndexedSeq[E] => is
case _ => super.from(it)
}
}
/** Base trait for immutable indexed Seq operations */
trait IndexedSeqOps[+A, +CC[_], +C]
extends SeqOps[A, CC, C]
with collection.IndexedSeqOps[A, CC, C] {
override def slice(from: Int, until: Int): C = {
// since we are immutable we can just share the same collection
if (from <= 0 && until >= length) coll
else super.slice(from, until)
}
}
/** Base trait for immutable linear sequences that have efficient `head` and `tail` */
trait LinearSeq[+A]
extends Seq[A]
with collection.LinearSeq[A]
with LinearSeqOps[A, LinearSeq, LinearSeq[A]]
with IterableFactoryDefaults[A, LinearSeq] {
override def iterableFactory: SeqFactory[LinearSeq] = LinearSeq
}
@SerialVersionUID(3L)
object LinearSeq extends SeqFactory.Delegate[LinearSeq](List) {
override def from[E](it: IterableOnce[E]): LinearSeq[E] = it match {
case ls: LinearSeq[E] => ls
case _ => super.from(it)
}
}
trait LinearSeqOps[+A, +CC[X] <: LinearSeq[X], +C <: LinearSeq[A] with LinearSeqOps[A, CC, C]]
extends Any with SeqOps[A, CC, C]
with collection.LinearSeqOps[A, CC, C]
/** Explicit instantiation of the `Seq` trait to reduce class file size in subclasses. */
abstract class AbstractSeq[+A] extends scala.collection.AbstractSeq[A] with Seq[A]
| scala/scala | src/library/scala/collection/immutable/Seq.scala | Scala | apache-2.0 | 5,121 |
package exercises.ch03
object Ex19 {
def filter[A](as: List[A])(f: A => Boolean): List[A] = as match {
case Nil => Nil
case Cons(x, xs) => if(f(x)) Cons(x, filter(xs)(f)) else filter(xs)(f)
}
def main(args: Array[String]): Unit = {
println(filter(List(1))(x => x % 2 == 0))
println(filter(List(1,2))(x => x % 2 == 0))
println(filter(List(1,2,3))(x => x % 2 == 0))
println(filter(List(1,2,3,4))(x => x % 2 == 0))
println(filter(List(1,2,3,4,5))(x => x % 2 == 0))
println(filter(List(1,2,3,4,5,6))(x => x % 2 == 0))
}
}
| VladMinzatu/fpinscala-exercises | src/main/scala/exercises/ch03/Ex19.scala | Scala | mit | 561 |
package com.github.pheymann.scala.bft.storage
import com.github.pheymann.scala.bft.consensus.ConsensusState
import com.github.pheymann.scala.bft.messaging._
import com.github.pheymann.scala.bft.replica.ReplicaAction
sealed trait StorageAction[A] extends ReplicaAction[A]
final case class StorePrePrepare(
request: ClientRequest,
state: ConsensusState
) extends StorageAction[Unit]
final case class StorePrepare(message: PrepareMessage) extends StorageAction[Unit]
final case class StoreCommit(message: CommitMessage) extends StorageAction[Unit]
| pheymann/scala.bft | bft-replica/src/main/scala/com/github/pheymann/scala/bft/storage/StorageAction.scala | Scala | mit | 655 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
import scala.collection.mutable.WrappedArray
import scala.util.Success
import SharedHelpers.{javaList, javaSortedMap}
import scala.xml.NodeSeq
class PrettifierSpec extends Spec with Matchers {
object `A Prettifier` {
def `should convert an Any to a String` {
val f =
new Prettifier {
def apply(o: Any) = o.toString
}
f("hi") should be ("hi")
f(List("hi")) should be ("List(hi)")
}
def `can be composed with another Prettifier` {
case class Yell(secret: String)
val myLittlePretty =
new Prettifier {
def apply(o: Any) =
o match {
case Yell(secret) => secret.toUpperCase + "!!!"
case _ => Prettifier.default(o)
}
}
myLittlePretty(Yell("I like fruit loops")) should be ("I LIKE FRUIT LOOPS!!!")
myLittlePretty("hi") should be ("\\"hi\\"")
myLittlePretty('h') should be ("'h'")
myLittlePretty(Array(1, 2, 3)) should be ("Array(1, 2, 3)")
myLittlePretty(WrappedArray.make(Array(1, 2, 3))) should be ("Array(1, 2, 3)")
myLittlePretty(null) should be ("null")
myLittlePretty(()) should be ("<(), the Unit value>")
myLittlePretty(List("1", "2", "3")) should be ("List(\\"1\\", \\"2\\", \\"3\\")")
}
}
object `the basic Prettifier` {
def `should put double quotes around strings` {
Prettifier.basic("hi") should be ("\\"hi\\"")
}
def `should put single quotes around chars` {
Prettifier.basic('h') should be ("'h'")
}
def `should pretty print arrays` {
Prettifier.basic(Array(1, 2, 3)) should be ("Array(1, 2, 3)")
}
def `should pretty print wrapped arrays` {
Prettifier.basic(WrappedArray.make(Array(1, 2, 3))) should be ("Array(1, 2, 3)")
}
def `should pretty print string arrays` {
Prettifier.basic(Array("1", "2", "3")) should be ("Array(1, 2, 3)")
}
def `should pretty print nested string arrays` {
Prettifier.basic(Array(Array("1", "2", "3"))) should be ("Array(Array(1, 2, 3))")
}
def `should pretty print wrapped string arrays` {
Prettifier.basic(WrappedArray.make(Array("1", "2", "3"))) should be ("Array(1, 2, 3)")
}
def `should show null as "null"` {
Prettifier.basic(null) should be ("null")
}
def `should clarify the Unit value` {
Prettifier.basic(()) should be ("<(), the Unit value>")
}
def `should just call toString on anything not specially treated` {
Prettifier.basic(List("1", "2", "3")) should be ("List(1, 2, 3)")
}
def `should pretty print GenTraversable` {
Prettifier.basic(List(1, 2, 3)) should be ("List(1, 2, 3)")
}
def `should pretty print string GenTraversable` {
Prettifier.basic(List("1", "2", "3")) should be ("List(1, 2, 3)")
}
def `should pretty print nested string GenTraversable` {
Prettifier.basic(List(List("1", "2", "3"))) should be ("List(List(1, 2, 3))")
}
def `should pretty print Some(Int)` {
Prettifier.basic(Some(8)) should be ("Some(8)")
}
def `should pretty print Some(String)` {
Prettifier.basic(Some("8")) should be ("Some(8)")
}
def `should pretty print nested Some(String)` {
Prettifier.basic(Some(Some("8"))) should be ("Some(Some(8))")
}
def `should pretty print Success(Int)` {
Prettifier.basic(Success(8)) should be ("Success(8)")
}
def `should pretty print Success(String)` {
Prettifier.basic(Success("8")) should be ("Success(8)")
}
def `should pretty print nested Success(String)` {
Prettifier.basic(Success(Success("8"))) should be ("Success(Success(8))")
}
def `should pretty print Left(Int)` {
Prettifier.basic(Left(8)) should be ("Left(8)")
}
def `should pretty print Left(String)` {
Prettifier.basic(Left("8")) should be ("Left(8)")
}
def `should pretty print nested Left(String)` {
Prettifier.basic(Left(Left("8"))) should be ("Left(Left(8))")
}
def `should pretty print Right(Int)` {
Prettifier.basic(Right(8)) should be ("Right(8)")
}
def `should pretty print Right(String)` {
Prettifier.basic(Right("8")) should be ("Right(8)")
}
def `should pretty print nested Right(String)` {
Prettifier.basic(Right(Right("8"))) should be ("Right(Right(8))")
}
def `should pretty print Good(Int)` {
Prettifier.basic(Good(8)) should be ("Good(8)")
}
def `should pretty print Good(String)` {
Prettifier.basic(Good("8")) should be ("Good(8)")
}
def `should pretty print nested Good(String)` {
Prettifier.basic(Good(Good("8"))) should be ("Good(Good(8))")
}
def `should pretty print Bad(Int)` {
Prettifier.basic(Bad(8)) should be ("Bad(8)")
}
def `should pretty print Bad(String)` {
Prettifier.basic(Bad("8")) should be ("Bad(8)")
}
def `should pretty print nested Bad(String)` {
Prettifier.basic(Bad(Bad("8"))) should be ("Bad(Bad(8))")
}
def `should pretty print One(Int)` {
Prettifier.basic(One(8)) should be ("One(8)")
}
def `should pretty print One(String)` {
Prettifier.basic(One("8")) should be ("One(8)")
}
def `should pretty print nested One(String)` {
Prettifier.basic(One(One("8"))) should be ("One(One(8))")
}
def `should pretty print Many(Int)` {
Prettifier.basic(Many(1, 2, 3)) should be ("Many(1, 2, 3)")
}
def `should pretty print Many(String)` {
Prettifier.basic(Many("1", "2", "3")) should be ("Many(1, 2, 3)")
}
def `should pretty print nested Many(String)` {
Prettifier.basic(Many(Many("1", "2", "3"), Many("7", "8", "9"))) should be ("Many(Many(1, 2, 3), Many(7, 8, 9))")
}
def `should pretty print Java List` {
Prettifier.basic(javaList(1, 2, 3)) should be ("[1, 2, 3]")
}
def `should pretty print string Java List` {
Prettifier.basic(javaList("1", "2", "3")) should be ("[1, 2, 3]")
}
def `should pretty print nested string Java List` {
Prettifier.basic(javaList(javaList("1", "2", "3"))) should be ("[[1, 2, 3]]")
}
def `should pretty print Java Map` {
Prettifier.basic(javaSortedMap(Entry(1, 2), Entry(2, 3), Entry(3, 8))) should be ("{1=2, 2=3, 3=8}")
}
def `should pretty print string Java Map` {
Prettifier.basic(javaSortedMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))) should be ("{1=one, 2=two, 3=three}")
}
def `should pretty print nested string Java Map` {
Prettifier.basic(javaSortedMap(Entry("akey", javaSortedMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))))) should be ("{akey={1=one, 2=two, 3=three}}")
}
}
object `the default Prettifier` {
def `should put double quotes around strings` {
Prettifier.default("hi") should be ("\\"hi\\"")
}
def `should put double quotes around scala.collection.immutable.StringOps` {
Prettifier.default(new scala.collection.immutable.StringOps("hi")) should be ("\\"hi\\"")
}
def `should put single quotes around chars` {
Prettifier.default('h') should be ("'h'")
}
def `should pretty print arrays` {
Prettifier.default(Array(1, 2, 3)) should be ("Array(1, 2, 3)")
}
def `should pretty print wrapped arrays` {
Prettifier.default(WrappedArray.make(Array(1, 2, 3))) should be ("Array(1, 2, 3)")
}
def `should pretty print string arrays` {
Prettifier.default(Array("1", "2", "3")) should be ("Array(\\"1\\", \\"2\\", \\"3\\")")
}
def `should pretty print nested string arrays` {
Prettifier.default(Array(Array("1", "2", "3"))) should be ("Array(Array(\\"1\\", \\"2\\", \\"3\\"))")
}
def `should pretty print wrapped string arrays` {
Prettifier.default(WrappedArray.make(Array("1", "2", "3"))) should be ("Array(\\"1\\", \\"2\\", \\"3\\")")
}
def `should show null as "null"` {
Prettifier.default(null) should be ("null")
}
def `should clarify the Unit value` {
Prettifier.default(()) should be ("<(), the Unit value>")
}
def `should just call toString on anything not specially treated` {
Prettifier.default(List("1", "2", "3")) should be ("List(\\"1\\", \\"2\\", \\"3\\")")
}
def `should pretty print GenTraversable` {
Prettifier.default(List(1, 2, 3)) should be ("List(1, 2, 3)")
}
def `should pretty print string GenTraversable` {
Prettifier.default(List("1", "2", "3")) should be ("List(\\"1\\", \\"2\\", \\"3\\")")
}
def `should pretty print nested string GenTraversable` {
Prettifier.default(List(List("1", "2", "3"))) should be ("List(List(\\"1\\", \\"2\\", \\"3\\"))")
}
def `should pretty print Some(Int)` {
Prettifier.default(Some(8)) should be ("Some(8)")
}
def `should pretty print Some(String)` {
Prettifier.default(Some("8")) should be ("Some(\\"8\\")")
}
def `should pretty print nested Some(String)` {
Prettifier.default(Some(Some("8"))) should be ("Some(Some(\\"8\\"))")
}
def `should pretty print Success(Int)` {
Prettifier.default(Success(8)) should be ("Success(8)")
}
def `should pretty print Success(String)` {
Prettifier.default(Success("8")) should be ("Success(\\"8\\")")
}
def `should pretty print nested Success(String)` {
Prettifier.default(Success(Success("8"))) should be ("Success(Success(\\"8\\"))")
}
def `should pretty print Left(Int)` {
Prettifier.default(Left(8)) should be ("Left(8)")
}
def `should pretty print Left(String)` {
Prettifier.default(Left("8")) should be ("Left(\\"8\\")")
}
def `should pretty print nested Left(String)` {
Prettifier.default(Left(Left("8"))) should be ("Left(Left(\\"8\\"))")
}
def `should pretty print Right(Int)` {
Prettifier.default(Right(8)) should be ("Right(8)")
}
def `should pretty print Right(String)` {
Prettifier.default(Right("8")) should be ("Right(\\"8\\")")
}
def `should pretty print nested Right(String)` {
Prettifier.default(Right(Right("8"))) should be ("Right(Right(\\"8\\"))")
}
def `should pretty print Good(Int)` {
Prettifier.default(Good(8)) should be ("Good(8)")
}
def `should pretty print Good(String)` {
Prettifier.default(Good("8")) should be ("Good(\\"8\\")")
}
def `should pretty print nested Good(String)` {
Prettifier.default(Good(Good("8"))) should be ("Good(Good(\\"8\\"))")
}
def `should pretty print Bad(Int)` {
Prettifier.default(Bad(8)) should be ("Bad(8)")
}
def `should pretty print Bad(String)` {
Prettifier.default(Bad("8")) should be ("Bad(\\"8\\")")
}
def `should pretty print nested Bad(String)` {
Prettifier.default(Bad(Bad("8"))) should be ("Bad(Bad(\\"8\\"))")
}
def `should pretty print One(Int)` {
Prettifier.default(One(8)) should be ("One(8)")
}
def `should pretty print One(String)` {
Prettifier.default(One("8")) should be ("One(\\"8\\")")
}
def `should pretty print nested One(String)` {
Prettifier.default(One(One("8"))) should be ("One(One(\\"8\\"))")
}
def `should pretty print Many(Int)` {
Prettifier.default(Many(1, 2, 3)) should be ("Many(1, 2, 3)")
}
def `should pretty print Many(String)` {
Prettifier.default(Many("1", "2", "3")) should be ("Many(\\"1\\", \\"2\\", \\"3\\")")
}
def `should pretty print nested Many(String)` {
Prettifier.default(Many(Many("1", "2", "3"), Many("7", "8", "9"))) should be ("Many(Many(\\"1\\", \\"2\\", \\"3\\"), Many(\\"7\\", \\"8\\", \\"9\\"))")
}
def `should pretty print Java List` {
Prettifier.default(javaList(1, 2, 3)) should be ("[1, 2, 3]")
}
def `should pretty print string Java List` {
Prettifier.default(javaList("1", "2", "3")) should be ("[\\"1\\", \\"2\\", \\"3\\"]")
}
def `should pretty print nested string Java List` {
Prettifier.default(javaList(javaList("1", "2", "3"))) should be ("[[\\"1\\", \\"2\\", \\"3\\"]]")
}
def `should pretty print Java Map` {
Prettifier.default(javaSortedMap(Entry(1, 2), Entry(2, 3), Entry(3, 8))) should be ("{1=2, 2=3, 3=8}")
}
def `should pretty print string Java Map` {
Prettifier.default(javaSortedMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))) should be ("{1=\\"one\\", 2=\\"two\\", 3=\\"three\\"}")
}
def `should pretty print nested string Java Map` {
Prettifier.default(javaSortedMap(Entry("akey", javaSortedMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))))) should be ("{\\"akey\\"={1=\\"one\\", 2=\\"two\\", 3=\\"three\\"}}")
}
def `should pretty print xml <a></a>` {
Prettifier.default(<a></a>) should be ("<a></a>")
}
def `should pretty print xml <a/>` {
Prettifier.default(<a/>) should be ("<a/>")
}
def `should pretty print xml <a><b/></a>` {
Prettifier.default(<a><b/></a>) should be ("<a><b/></a>")
}
def `should pretty print xml <a/><b/>` {
Prettifier.default(<a/><b/>) should be ("<a/><b/>")
}
def `should pretty print xml.NodeSeq <a/><b/>` {
val ab: NodeSeq = <a/><b/>;
Prettifier.default(ab) should be ("<a/><b/>")
}
def `should handle runaway recursion gracefully, if not necessarily quickly` {
/*
You'd think no one would do this, but:
scala> val me = <a></a>
me: scala.xml.Elem = <a></a>
scala> val you = me.iterator.next
you: scala.xml.Node = <a></a>
scala> me eq you
res0: Boolean = true
*/
class Fred extends Seq[Fred] { thisFred =>
override def toIterator: Iterator[Fred] = iterator
def iterator: Iterator[Fred] =
new Iterator[Fred] {
private var hasNextElement: Boolean = true
def next: Fred = {
if (hasNextElement) {
hasNextElement = false
thisFred
}
else throw new NoSuchElementException
}
def hasNext: Boolean = hasNextElement
}
def apply(idx: Int): Fred = if (idx == 0) thisFred else throw new NoSuchElementException
def length: Int = 1
override def toString = "It's Fred all the way down"
}
Prettifier.default(new Fred) shouldBe "It's Fred all the way down"
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalactic/PrettifierSpec.scala | Scala | apache-2.0 | 14,967 |
package gh.test.gh2013.event
import gh2013.events.{IssueCommentEventParser, IssuesEventParser}
import net.liftweb.json._
import org.scalatest.{FlatSpec, Matchers}
class IssueCommentEventTest extends FlatSpec with Matchers
{
"A valid IssueCommentEvent" must "be correctly parsed" in {
val json = parse(
"""
|{
|
| "actor":"trevnorris",
| "created_at":"2013-01-04T10:41:48-08:00",
| "repository":{
| "owner":"joyent",
| "created_at":"2009-05-27T09:29:46-07:00",
| "homepage":"http://nodejs.org/",
| "open_issues":494,
| "pushed_at":"2013-01-04T10:26:05-08:00",
| "url":"https://github.com/joyent/node",
| "description":"evented I/O for v8 javascript",
| "forks":3046,
| "has_downloads":false,
| "organization":"joyent",
| "watchers":19431,
| "fork":false,
| "size":61420,
| "has_issues":true,
| "name":"node",
| "stargazers":19431,
| "language":"JavaScript",
| "id":211666,
| "private":false,
| "has_wiki":true
| },
| "public":true,
| "payload":{
| "issue_id":9601460,
| "comment_id":11894342
| },
| "actor_attributes":{
| "location":"California",
| "login":"trevnorris",
| "company":"Mozilla Corporation",
| "blog":"http://blog.trevorjnorris.com/",
| "type":"User",
| "gravatar_id":"3e440ff4686867b1929ce68684591885",
| "name":"Trevor Norris"
| },
| "type":"IssueCommentEvent",
| "url":"https://github.com/joyent/node/issues/4504#issuecomment-11894342"
|
|}
""".stripMargin)
gh2013.parser(IssueCommentEventParser)(json) shouldBe 'defined
}
}
| mgoeminne/github_etl | src/test/scala/gh/test/gh2013/event/IssueCommentEventTest.scala | Scala | mit | 2,201 |
package org.randi3.web.snippet
import xml.Elem
import scalaz.NonEmptyList
import net.liftweb.http.S
import org.randi3.web.lib.DependencyFactory
import org.randi3.web.util.CurrentLoggedInUser
trait GeneralFormSnippet {
protected def generateEntry(id: String, failure: Boolean, element: Elem): Elem = {
<li id={id + "Li"} class={if (failure) "errorHint" else ""}>
<label for={id}>
{S.?(id)}
</label>{element}<lift:msg id={id + "Msg"} errorClass="err"/>
</li>
}
protected def showErrorMessage(id: String, errors: NonEmptyList[String]) {
S.error(id + "Msg", "<-" + errors.list.reduce((acc, el) => acc + ", " + el))
}
protected def clearErrorMessage(id: String) {
S.error(id + "Msg", "")
}
private val userService = DependencyFactory.get.userService
protected def updateCurrentUser = {
userService.get(CurrentLoggedInUser.get.get.id).toEither match {
case Left(x) => S.error(x)
case Right(user) => {
CurrentLoggedInUser(user)
}
}
}
protected def generateEntryWithInfo(id: String, failure: Boolean, info: String, element: Elem): Elem = {
<li id={id + "Li"} class={if (failure) "errorHint" else ""}>
<label for={id}>
<span>
{id}
</span>
<span class="tooltip">
<img src="/images/icons/help16.png" alt={info} title={info}/> <span class="info">
{info}
</span>
</span>
</label>{element}<lift:msg id={id + "Msg"} errorClass="err"/>
</li>
}
}
| dschrimpf/randi3-web | src/main/scala/org/randi3/web/snippet/GeneralFormSnippet.scala | Scala | gpl-3.0 | 1,523 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.spring.view
import _root_.java.util.Locale
import _root_.javax.servlet.ServletConfig
import _root_.javax.servlet.http.HttpServletRequest
import _root_.javax.servlet.http.HttpServletResponse
import _root_.org.fusesource.scalate.RenderContext
import _root_.org.fusesource.scalate.servlet.ServletRenderContext
import _root_.org.fusesource.scalate.servlet.ServletTemplateEngine
import _root_.org.springframework.web.context.ServletConfigAware
import _root_.scala.collection.JavaConversions._
import _root_.org.fusesource.scalate.TemplateException
import _root_.org.springframework.web.servlet.view.{ AbstractView, AbstractTemplateView }
import _root_.org.slf4j.LoggerFactory
import org.fusesource.scalate.util.ResourceNotFoundException
trait ScalateRenderStrategy {
protected val log = LoggerFactory.getLogger(getClass)
def render(context: ServletRenderContext, model: Map[String, Any]);
}
trait LayoutScalateRenderStrategy extends AbstractTemplateView with ScalateRenderStrategy {
def templateEngine: ServletTemplateEngine
def render(context: ServletRenderContext, model: Map[String, Any]) {
log.debug("Rendering view with name '" + getUrl + "' with model " + model)
for ((key, value) <- model) {
context.attributes(key) = value
}
templateEngine.layout(getUrl, context)
}
}
trait DefaultScalateRenderStrategy extends AbstractTemplateView with ScalateRenderStrategy {
override def render(context: ServletRenderContext, model: Map[String, Any]) {
log.debug("Rendering view with name '" + getUrl + "' with model " + model)
context.render(getUrl, model)
}
}
trait ViewScalateRenderStrategy extends ScalateRenderStrategy {
override def render(context: ServletRenderContext, model: Map[String, Any]) {
log.debug("Rendering with model " + model)
val it = model.get("it")
if (it.isEmpty)
throw new TemplateException("No 'it' model object specified. Cannot render request")
context.view(it.get.asInstanceOf[AnyRef])
}
}
trait AbstractScalateView extends AbstractView {
var templateEngine: ServletTemplateEngine = _;
def checkResource(locale: Locale): Boolean;
}
class ScalateUrlView extends AbstractTemplateView with AbstractScalateView
with LayoutScalateRenderStrategy {
override def renderMergedTemplateModel(model: java.util.Map[String, Object],
request: HttpServletRequest,
response: HttpServletResponse): Unit = {
val context = new ServletRenderContext(templateEngine, request, response, getServletContext)
RenderContext.using(context) {
render(context, model.asInstanceOf[java.util.Map[String, Any]].toMap)
}
}
override def checkResource(locale: Locale): Boolean = try {
log.debug("Checking for resource " + getUrl)
templateEngine.load(getUrl)
true
} catch {
case e: ResourceNotFoundException => {
log.info("Could not find resource " + getUrl);
false
}
}
}
class ScalateView extends AbstractScalateView with ViewScalateRenderStrategy {
override def checkResource(locale: Locale) = true;
override def renderMergedOutputModel(model: java.util.Map[String, Object],
request: HttpServletRequest,
response: HttpServletResponse): Unit = {
val context = new ServletRenderContext(templateEngine, request, response, getServletContext)
RenderContext.using(context) {
render(context, model.asInstanceOf[java.util.Map[String, Any]].toMap)
}
}
}
| dnatic09/scalate | scalate-spring-mvc/src/main/scala/org/fusesource/scalate/spring/view/ScalateView.scala | Scala | apache-2.0 | 4,189 |
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import org.scalatest._
class ConfigMapFixtureSpec extends org.scalatest.FunSpec with SharedHelpers {
describe("A ConfigMapFixture") {
it("should pass the config map to each test") {
val myConfigMap = Map[String, Any]("hello" -> "world", "salt" -> "pepper")
class MySuite extends fixture.Suite with ConfigMapFixture {
var configMapPassed = false
def testSomething(configMap: FixtureParam) {
if (configMap == myConfigMap)
configMapPassed = true
}
}
val suite = new MySuite
suite.run(None, Args(SilentReporter, Stopper.default, Filter(), myConfigMap, None, new Tracker, Set.empty))
assert(suite.configMapPassed)
}
}
}
| hubertp/scalatest | src/test/scala/org/scalatest/fixture/ConfigMapFixtureSpec.scala | Scala | apache-2.0 | 1,342 |
package net.sansa_stack.inference.spark.forwardchaining.triples
import net.sansa_stack.inference.spark.data.model.RDFGraph
import net.sansa_stack.inference.spark.data.model.TripleUtils._
import net.sansa_stack.inference.utils.Profiler
import org.apache.jena.graph.{Node, Triple}
import org.apache.spark.rdd.RDD
import scala.collection.mutable
/**
* A forward chaining based reasoner.
*
* @author Lorenz Buehmann
*/
trait ForwardRuleReasoner extends Profiler {
/**
* Applies forward chaining to the given RDD of RDF triples and returns a new
* RDD of RDF triples that contains all additional triples based on the underlying
* set of rules.
*
* @param triples the RDF triples
* @return the materialized set of RDF triples
*/
def apply(triples: RDD[Triple]) : RDD[Triple] = apply(RDFGraph(triples)).triples
/**
* Applies forward chaining to the given RDF graph and returns a new RDF graph that contains all additional
* triples based on the underlying set of rules.
*
* @param graph the RDF graph
* @return the materialized RDF graph
*/
def apply(graph: RDFGraph) : RDFGraph
/**
* Extracts all triples for the given predicate.
*
* @param triples the triples
* @param predicate the predicate
* @return the set of triples that contain the predicate
*/
def extractTriples(triples: mutable.Set[Triple], predicate: Node): mutable.Set[Triple] = {
triples.filter(triple => triple.p == predicate)
}
/**
* Extracts all triples for the given predicate.
*
* @param triples the RDD of triples
* @param predicate the predicate
* @return the RDD of triples that contain the predicate
*/
def extractTriples(triples: RDD[Triple], predicate: Node): RDD[Triple] = {
triples.filter(triple => triple.p == predicate)
}
/**
* Extracts all triples that match the given subject, predicate and object if defined.
*
* @param triples the RDD of triples
* @param subject the subject
* @param predicate the predicate
* @param obj the object
* @return the RDD of triples that match
*/
def extractTriples(triples: RDD[Triple],
subject: Option[Node],
predicate: Option[Node],
obj: Option[Node]): RDD[Triple] = {
var extractedTriples = triples
if(subject.isDefined) {
extractedTriples = extractedTriples.filter(triple => triple.s == subject.get)
}
if(predicate.isDefined) {
extractedTriples = extractedTriples.filter(triple => triple.p == predicate.get)
}
if(obj.isDefined) {
extractedTriples = extractedTriples.filter(triple => triple.o == obj.get)
}
extractedTriples
}
}
| SANSA-Stack/SANSA-RDF | sansa-inference/sansa-inference-spark/src/main/scala/net/sansa_stack/inference/spark/forwardchaining/triples/ForwardRuleReasoner.scala | Scala | apache-2.0 | 2,736 |
package org.jetbrains.sbt
package annotator
import java.io.File
import com.intellij.ide.startup.impl.StartupManagerImpl
import com.intellij.openapi.externalSystem.util.ExternalSystemConstants
import com.intellij.openapi.module.{Module, ModuleManager, ModuleUtilCore}
import com.intellij.openapi.projectRoots.Sdk
import com.intellij.openapi.roots.{ModifiableRootModel, ModuleRootModificationUtil}
import com.intellij.openapi.startup.StartupManager
import com.intellij.openapi.vfs.{LocalFileSystem, VfsUtilCore}
import com.intellij.psi.PsiManager
import com.intellij.testFramework.UsefulTestCase
import com.intellij.util.Consumer
import org.jetbrains.plugins.scala.annotator.{Error, _}
import org.jetbrains.plugins.scala.base.libraryLoaders.LibraryLoader
import org.jetbrains.plugins.scala.util.TestUtils
import org.jetbrains.sbt.language.SbtFileImpl
import org.jetbrains.sbt.project.module.SbtModuleType
import org.jetbrains.sbt.project.settings.SbtProjectSettings
import org.jetbrains.sbt.settings.SbtSystemSettings
import scala.collection.JavaConverters._
/**
* @author Nikolay Obedin
* @since 7/23/15.
*/
abstract class SbtAnnotatorTestBase extends AnnotatorTestBase with MockSbt {
override implicit protected lazy val module: Module = inWriteAction {
val moduleName = getModule.getName + Sbt.BuildModuleSuffix + ".iml"
val module = ModuleManager.getInstance(getProject).newModule(moduleName, SbtModuleType.instance.getId)
ModuleRootModificationUtil.setModuleSdk(module, getTestProjectJdk)
module
}
override protected def setUp(): Unit = {
super.setUp()
setUpLibraries()
addTestFileToModuleSources()
setUpProjectSettings()
inWriteAction {
StartupManager.getInstance(getProject) match {
case manager: StartupManagerImpl => manager.startCacheUpdate()
}
}
}
override def loadTestFile(): SbtFileImpl = {
val fileName = "SbtAnnotator.sbt"
val filePath = testdataPath + fileName
val vfile = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
val psifile = PsiManager.getInstance(getProject).findFile(vfile)
psifile.putUserData(ModuleUtilCore.KEY_MODULE, getModule)
psifile.asInstanceOf[SbtFileImpl]
}
override def getTestProjectJdk: Sdk = TestUtils.createJdk()
protected def runTest(sbtVersion: String, expectedMessages: Seq[Message]): Unit = {
setSbtVersion(sbtVersion)
val actualMessages = annotate().asJava
UsefulTestCase.assertSameElements(actualMessages, expectedMessages: _*)
}
protected def setSbtVersion(sbtVersion: String): Unit = {
val projectSettings = SbtSystemSettings.getInstance(getProject).getLinkedProjectSettings(getProject.getBasePath)
assert(projectSettings != null)
projectSettings.setSbtVersion(sbtVersion)
}
private def annotate(): Seq[Message] = {
val file = loadTestFile()
val mock = new AnnotatorHolderMock(file)
val annotator = new SbtAnnotator
annotator.annotate(file, mock)
mock.annotations
}
private def setUpProjectSettings(): Unit = {
val projectSettings = SbtProjectSettings.default
projectSettings.setExternalProjectPath(getProject.getBasePath)
projectSettings.setModules(java.util.Collections.singleton(getModule.getModuleFilePath))
SbtSystemSettings.getInstance(getProject).linkProject(projectSettings)
getModule.setOption(ExternalSystemConstants.ROOT_PROJECT_PATH_KEY, getProject.getBasePath)
}
private def addTestFileToModuleSources(): Unit = {
ModuleRootModificationUtil.updateModel(getModule, new Consumer[ModifiableRootModel] {
override def consume(model: ModifiableRootModel): Unit = {
val testdataUrl = VfsUtilCore.pathToUrl(testdataPath)
model.addContentEntry(testdataUrl).addSourceFolder(testdataUrl, false)
}
})
LibraryLoader.storePointers()
}
}
class SbtAnnotatorTest_0_12_4 extends SbtAnnotatorTestBase {
override implicit val sbtVersion: String = "0.12.4"
def test(): Unit = runTest(sbtVersion, Expectations.sbt012)
}
class SbtAnnotatorTest_0_13_1 extends SbtAnnotatorTestBase {
override implicit val sbtVersion: String = "0.13.1"
def test(): Unit = runTest(sbtVersion, Expectations.sbt012_013(sbtVersion))
}
class SbtAnnotatorTest_0_13_7 extends SbtAnnotatorTestBase {
override implicit val sbtVersion: String = "0.13.7"
def test(): Unit = runTest(sbtVersion, Expectations.sbt0137)
}
class SbtAnnotatorTest_latest extends SbtAnnotatorTestBase {
override implicit val sbtVersion: String = Sbt.LatestVersion
def test(): Unit = runTest(sbtVersion, Expectations.sbt0137)
}
/**
* Expected error messages for specific sbt versions. Newer versions usually allow more syntactic constructs in the sbt files
*/
object Expectations {
val sbtAll: Seq[Error] = Seq(
Error("object Bar", SbtBundle("sbt.annotation.sbtFileMustContainOnlyExpressions"))
)
val sbt0137: Seq[Error] = sbtAll ++ Seq(
Error("organization", SbtBundle("sbt.annotation.expressionMustConformSbt0136", "SettingKey[String]")),
Error(""""some string"""", SbtBundle("sbt.annotation.expressionMustConformSbt0136", "String")),
Error("null", SbtBundle("sbt.annotation.expectedExpressionTypeSbt0136")),
Error("???", SbtBundle("sbt.annotation.expectedExpressionTypeSbt0136"))
)
def sbt012_013(sbtVersion: String): Seq[Error] = sbtAll ++ Seq(
Error("organization", SbtBundle("sbt.annotation.expressionMustConform", "SettingKey[String]")),
Error(""""some string"""", SbtBundle("sbt.annotation.expressionMustConform", "String")),
Error("null", SbtBundle("sbt.annotation.expectedExpressionType")),
Error("???", SbtBundle("sbt.annotation.expectedExpressionType")),
Error("""version := "SNAPSHOT"""", SbtBundle("sbt.annotation.blankLineRequired", sbtVersion))
)
def sbt012: Seq[Error] = sbt012_013("0.12.4") ++ Seq(
Error(
"""lazy val foo = project.in(file("foo")).enablePlugins(sbt.plugins.JvmPlugin)""",
SbtBundle("sbt.annotation.sbtFileMustContainOnlyExpressions"))
)
}
| ilinum/intellij-scala | test/org/jetbrains/sbt/annotator/SbtAnnotatorTest.scala | Scala | apache-2.0 | 6,046 |
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata.engine
import slamdata.Predef._
import scalaz._
import scalaz.concurrent._
import slamdata.engine.fs._; import Path._
import slamdata.engine.Errors._
sealed trait ResultPath {
def path: Path
}
object ResultPath {
/** Path to a result which names an unaltered source resource or the requested destination. */
final case class User(path: Path) extends ResultPath
/** Path to a result which is a new temporary resource created during query execution. */
final case class Temp(path: Path) extends ResultPath
}
trait Evaluator[PhysicalPlan] {
import Evaluator._
/**
* Executes the specified physical plan.
*
* Returns the location where the output results are located. In some
* cases (e.g. SELECT * FROM FOO), this may not be equal to the specified
* destination resource (because this would require copying all the data).
*/
def execute(physical: PhysicalPlan): ETask[EvaluationError, ResultPath]
/**
* Compile the specified physical plan to a command
* that can be run natively on the backend.
*/
def compile(physical: PhysicalPlan): (String, Cord)
/**
* Fails if the backend implementation is not compatible with the connected
* system (typically because it does not have not the correct version number).
*/
def checkCompatibility: ETask[EnvironmentError, Unit]
}
object Evaluator {
sealed trait EnvironmentError {
def message: String
}
object EnvironmentError {
final case class MissingBackend(message: String) extends EnvironmentError
final case class MissingFileSystem(path: Path, config: slamdata.engine.config.BackendConfig) extends EnvironmentError {
def message = "No data source could be mounted at the path " + path + " using the config " + config
}
final case object MissingDatabase extends EnvironmentError {
def message = "no database found"
}
final case class InvalidConfig(message: String) extends EnvironmentError
final case class EnvPathError(error: PathError) extends EnvironmentError {
def message = error.message
}
final case class EnvEvalError(error: EvaluationError) extends EnvironmentError {
def message = error.message
}
final case class UnsupportedVersion(backend: Evaluator[_], version: List[Int]) extends EnvironmentError {
def message = "Unsupported " + backend + " version: " + version.mkString(".")
}
}
type EnvTask[A] = EitherT[Task, EnvironmentError, A]
implicit val EnvironmentErrorShow = Show.showFromToString[EnvironmentError]
object MissingBackend {
def apply(message: String): EnvironmentError = EnvironmentError.MissingBackend(message)
def unapply(obj: EnvironmentError): Option[String] = obj match {
case EnvironmentError.MissingBackend(message) => Some(message)
case _ => None
}
}
object MissingFileSystem {
def apply(path: Path, config: slamdata.engine.config.BackendConfig): EnvironmentError = EnvironmentError.MissingFileSystem(path, config)
def unapply(obj: EnvironmentError): Option[(Path, slamdata.engine.config.BackendConfig)] = obj match {
case EnvironmentError.MissingFileSystem(path, config) => Some((path, config))
case _ => None
}
}
object MissingDatabase {
def apply(): EnvironmentError = EnvironmentError.MissingDatabase
def unapply(obj: EnvironmentError): Boolean = obj match {
case EnvironmentError.MissingDatabase => true
case _ => false
}
}
object InvalidConfig {
def apply(message: String): EnvironmentError = EnvironmentError.InvalidConfig(message)
def unapply(obj: EnvironmentError): Option[String] = obj match {
case EnvironmentError.InvalidConfig(message) => Some(message)
case _ => None
}
}
object EnvPathError {
def apply(error: PathError): EnvironmentError =
EnvironmentError.EnvPathError(error)
def unapply(obj: EnvironmentError): Option[PathError] = obj match {
case EnvironmentError.EnvPathError(error) => Some(error)
case _ => None
}
}
object EnvEvalError {
def apply(error: EvaluationError): EnvironmentError = EnvironmentError.EnvEvalError(error)
def unapply(obj: EnvironmentError): Option[EvaluationError] = obj match {
case EnvironmentError.EnvEvalError(error) => Some(error)
case _ => None
}
}
object UnsupportedVersion {
def apply(backend: Evaluator[_], version: List[Int]): EnvironmentError = EnvironmentError.UnsupportedVersion(backend, version)
def unapply(obj: EnvironmentError): Option[(Evaluator[_], List[Int])] = obj match {
case EnvironmentError.UnsupportedVersion(backend, version) => Some((backend, version))
case _ => None
}
}
sealed trait EvaluationError {
def message: String
}
object EvaluationError {
final case class EvalPathError(error: PathError) extends EvaluationError {
def message = error.message
}
final case object NoDatabase extends EvaluationError {
def message = "no database found"
}
final case class UnableToStore(message: String) extends EvaluationError
final case class InvalidTask(message: String) extends EvaluationError
}
type EvaluationTask[A] = ETask[EvaluationError, A]
object EvalPathError {
def apply(error: PathError): EvaluationError =
EvaluationError.EvalPathError(error)
def unapply(obj: EvaluationError): Option[PathError] = obj match {
case EvaluationError.EvalPathError(error) => Some(error)
case _ => None
}
}
object NoDatabase {
def apply(): EvaluationError = EvaluationError.NoDatabase
def unapply(obj: EvaluationError): Boolean = obj match {
case EvaluationError.NoDatabase => true
case _ => false
}
}
object UnableToStore {
def apply(message: String): EvaluationError = EvaluationError.UnableToStore(message)
def unapply(obj: EvaluationError): Option[String] = obj match {
case EvaluationError.UnableToStore(message) => Some(message)
case _ => None
}
}
object InvalidTask {
def apply(message: String): EvaluationError = EvaluationError.InvalidTask(message)
def unapply(obj: EvaluationError): Option[String] = obj match {
case EvaluationError.InvalidTask(message) => Some(message)
case _ => None
}
}
}
| wemrysi/quasar | core/src/main/scala/slamdata/engine/evaluator.scala | Scala | apache-2.0 | 7,120 |
package com.twitter.server.handler
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.{DefaultScalaModule, ScalaObjectMapper}
import com.twitter.util.registry.{GlobalRegistry, SimpleRegistry}
import org.scalatest.funsuite.AnyFunSuite
class RegistryHandlerTest extends AnyFunSuite {
private[this] val mapper = new ObjectMapper with ScalaObjectMapper {
registerModule(DefaultScalaModule)
}
private[this] val handler = new RegistryHandler()
/** used for testing filtering */
private[this] val filterRegistry = new SimpleRegistry()
filterRegistry.put(Seq("foo", "bar"), "baz")
filterRegistry.put(Seq("foo", "qux"), "quux")
filterRegistry.put(Seq("oof"), "gah")
filterRegistry.put(Seq("uno", "two"), "tres")
filterRegistry.put(Seq("one", "two"), "three")
filterRegistry.put(Seq("1", "a", "3"), "4")
filterRegistry.put(Seq("1", "b", "3"), "5")
// Some of these tests assume a specific iteration order over the registries
// and HashMaps which IS NOT a guarantee. should these tests begin to fail
// due to that, we will need to use `assertJsonResponseFor` for them as well.
private[this] def assertJsonResponse(filter: Option[String], expected: String) = {
val actual = stripWhitespace(handler.jsonResponse(filter))
assert(actual == expected)
}
private[this] def stripWhitespace(string: String): String =
string.filterNot(_.isWhitespace)
test("RegistryHandler generates reasonable json") {
val simple = new SimpleRegistry
simple.put(Seq("foo", "bar"), "baz")
simple.put(Seq("foo", "qux"), "quux")
GlobalRegistry.withRegistry(simple) {
assertJsonResponse(None, """{"registry":{"foo":{"bar":"baz","qux":"quux"}}}""")
}
}
test("RegistryHandler.jsonResponse filters with basic matches") {
GlobalRegistry.withRegistry(filterRegistry) {
type Response = Map[String, Object]
JsonHelper.assertJsonResponseFor[Response](
mapper,
stripWhitespace(handler.jsonResponse(Some("oof"))),
"""{"registry":{"oof":"gah"}}""")
JsonHelper.assertJsonResponseFor[Response](
mapper,
stripWhitespace(handler.jsonResponse(Some("foo"))),
"""{"registry":{"foo":{"bar":"baz","qux":"quux"}}}""")
JsonHelper.assertJsonResponseFor[Response](
mapper,
stripWhitespace(handler.jsonResponse(Some("foo/bar"))),
"""{"registry":{"foo":{"bar":"baz"}}}""")
}
}
test("RegistryHandler.jsonResponse filters with globs") {
GlobalRegistry.withRegistry(filterRegistry) {
assertJsonResponse(
Some("*/two"),
"""{"registry":{"uno":{"two":"tres"},"one":{"two":"three"}}}"""
)
assertJsonResponse(Some("1/*/3"), """{"registry":{"1":{"b":{"3":"5"},"a":{"3":"4"}}}}""")
}
}
test("RegistryHandler.jsonResponse filters strips off leading registry key") {
GlobalRegistry.withRegistry(filterRegistry) {
assertJsonResponse(Some("registry/oof"), """{"registry":{"oof":"gah"}}""")
}
}
test("RegistryHandler.jsonResponse filters when no keys match the filter") {
GlobalRegistry.withRegistry(filterRegistry) {
assertJsonResponse(Some("nope"), """{"registry":{}}""")
assertJsonResponse(Some(""), """{"registry":{}}""")
}
}
}
| twitter/twitter-server | server/src/test/scala/com/twitter/server/handler/RegistryHandlerTest.scala | Scala | apache-2.0 | 3,287 |
/* sbt -- Simple Build Tool
* Copyright 2009 Mark Harrah
*/
package sbt
package classfile
import Constants._
import java.io.File
private[sbt] trait ClassFile
{
val majorVersion: Int
val minorVersion: Int
val fileName: String
val className: String
val superClassName: String
val interfaceNames: Array[String]
val accessFlags: Int
val constantPool: Array[Constant]
val fields: Array[FieldOrMethodInfo]
val methods: Array[FieldOrMethodInfo]
val attributes: Array[AttributeInfo]
val sourceFile: Option[String]
def types: Set[String]
def stringValue(a: AttributeInfo): String
}
private[sbt] final case class Constant(tag: Byte, nameIndex: Int, typeIndex: Int, value: Option[AnyRef]) extends NotNull
{
def this(tag: Byte, nameIndex: Int, typeIndex: Int) = this(tag, nameIndex, typeIndex, None)
def this(tag: Byte, nameIndex: Int) = this(tag, nameIndex, -1)
def this(tag: Byte, value: AnyRef) = this(tag, -1, -1, Some(value))
def wide = tag == ConstantLong || tag == ConstantDouble
}
private[sbt] final case class FieldOrMethodInfo(accessFlags: Int, name: Option[String], descriptor: Option[String], attributes: IndexedSeq[AttributeInfo]) extends NotNull
{
def isStatic = (accessFlags&ACC_STATIC)== ACC_STATIC
def isPublic = (accessFlags&ACC_PUBLIC)==ACC_PUBLIC
def isMain = isPublic && isStatic && descriptor.filter(_ == "([Ljava/lang/String;)V").isDefined
}
private[sbt] final case class AttributeInfo(name: Option[String], value: Array[Byte]) extends NotNull
{
def isNamed(s: String) = name.filter(s == _).isDefined
def isSignature = isNamed("Signature")
def isSourceFile = isNamed("SourceFile")
}
private[sbt] object Constants
{
final val ACC_STATIC = 0x0008
final val ACC_PUBLIC = 0x0001
final val JavaMagic = 0xCAFEBABE
final val ConstantUTF8 = 1
final val ConstantUnicode = 2
final val ConstantInteger = 3
final val ConstantFloat = 4
final val ConstantLong = 5
final val ConstantDouble = 6
final val ConstantClass = 7
final val ConstantString = 8
final val ConstantField = 9
final val ConstantMethod = 10
final val ConstantInterfaceMethod = 11
final val ConstantNameAndType = 12
final val ClassDescriptor = 'L'
} | harrah/xsbt | util/classfile/src/main/scala/sbt/classfile/ClassFile.scala | Scala | bsd-3-clause | 2,158 |
package debop4s.rediscala.serializer
import org.xerial.snappy.Snappy
object SnappyRedisSerializer {
def apply[T](inner: RedisSerializer[T] = new BinaryRedisSerializer[T]()): SnappyRedisSerializer[T] =
new SnappyRedisSerializer[T](inner)
}
/**
* Snappy 압축 알고리즘을 이용하여 serialized 된 데이터를 압축합니다.
* @author Sunghyouk Bae
*/
class SnappyRedisSerializer[@miniboxed T](val inner: RedisSerializer[T] = new BinaryRedisSerializer[T])
extends RedisSerializer[T] {
/**
* Snappy 압축 알고리즘을 이용하여 serialized 된 데이터를 압축합니다.
* @param graph serialized 될 객체
* @return serialized 된 정보를 압축한 데이터
*/
override def serialize(graph: T): Array[Byte] = {
if (graph == null)
return EMPTY_BYTES
Snappy.compress(inner.serialize(graph))
}
/**
* Snappy 압축 알고리즘을 이용하여 serialized 된 데이터를 복원하고, deserialize 시킵니다.
* @param bytes 압축된 serialized 된 정보
* @return 원본 객체
*/
override def deserialize(bytes: Array[Byte]): T = {
if (bytes == null || bytes.length == 0)
return null.asInstanceOf[T]
inner.deserialize(Snappy.uncompress(bytes)).asInstanceOf[T]
}
}
| debop/debop4s | debop4s-rediscala/src/main/scala/debop4s/rediscala/serializer/SnappyRedisSerializer.scala | Scala | apache-2.0 | 1,272 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.activity
import com.krux.hyperion.adt.HBoolean
import com.krux.hyperion.datanode.S3DataNode
trait WithS3Input {
type Self <: WithS3Input
def shellCommandActivityFields: ShellCommandActivityFields
def updateShellCommandActivityFields(fields: ShellCommandActivityFields): Self
def withInput(inputs: S3DataNode*): Self = updateShellCommandActivityFields(
shellCommandActivityFields.copy(
input = shellCommandActivityFields.input ++ inputs,
stage = Option(HBoolean.True)
)
)
}
| realstraw/hyperion | core/src/main/scala/com/krux/hyperion/activity/WithS3Input.scala | Scala | bsd-3-clause | 764 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.util
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.DEFAULT_PARTITION_NAME
import org.apache.spark.sql.catalyst.util.CharVarcharCodegenUtils
import org.apache.spark.sql.catalyst.util.CharVarcharUtils
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{CharType, StructType, VarcharType}
import org.apache.spark.unsafe.types.UTF8String
private[sql] object PartitioningUtils {
/**
* Normalize the column names in partition specification, w.r.t. the real partition column names
* and case sensitivity. e.g., if the partition spec has a column named `monTh`, and there is a
* partition column named `month`, and it's case insensitive, we will normalize `monTh` to
* `month`.
*/
def normalizePartitionSpec[T](
partitionSpec: Map[String, T],
partCols: StructType,
tblName: String,
resolver: Resolver): Map[String, T] = {
val rawSchema = CharVarcharUtils.getRawSchema(partCols, SQLConf.get)
val normalizedPartSpec = partitionSpec.toSeq.map { case (key, value) =>
val normalizedFiled = rawSchema.find(f => resolver(f.name, key)).getOrElse {
throw QueryCompilationErrors.invalidPartitionColumnKeyInTableError(key, tblName)
}
val normalizedVal =
if (SQLConf.get.charVarcharAsString) value else normalizedFiled.dataType match {
case CharType(len) if value != null && value != DEFAULT_PARTITION_NAME =>
val v = value match {
case Some(str: String) => Some(charTypeWriteSideCheck(str, len))
case str: String => charTypeWriteSideCheck(str, len)
case other => other
}
v.asInstanceOf[T]
case VarcharType(len) if value != null && value != DEFAULT_PARTITION_NAME =>
val v = value match {
case Some(str: String) => Some(varcharTypeWriteSideCheck(str, len))
case str: String => varcharTypeWriteSideCheck(str, len)
case other => other
}
v.asInstanceOf[T]
case _ => value
}
normalizedFiled.name -> normalizedVal
}
SchemaUtils.checkColumnNameDuplication(
normalizedPartSpec.map(_._1), "in the partition schema", resolver)
normalizedPartSpec.toMap
}
private def charTypeWriteSideCheck(inputStr: String, limit: Int): String = {
val toUtf8 = UTF8String.fromString(inputStr)
CharVarcharCodegenUtils.charTypeWriteSideCheck(toUtf8, limit).toString
}
private def varcharTypeWriteSideCheck(inputStr: String, limit: Int): String = {
val toUtf8 = UTF8String.fromString(inputStr)
CharVarcharCodegenUtils.varcharTypeWriteSideCheck(toUtf8, limit).toString
}
/**
* Verify if the input partition spec exactly matches the existing defined partition spec
* The columns must be the same but the orders could be different.
*/
def requireExactMatchedPartitionSpec(
tableName: String,
spec: TablePartitionSpec,
partitionColumnNames: Seq[String]): Unit = {
val defined = partitionColumnNames.sorted
if (spec.keys.toSeq.sorted != defined) {
throw QueryCompilationErrors.invalidPartitionSpecError(spec.keys.mkString(", "),
partitionColumnNames, tableName)
}
}
}
| ueshin/apache-spark | sql/catalyst/src/main/scala/org/apache/spark/sql/util/PartitioningUtils.scala | Scala | apache-2.0 | 4,279 |
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.testkit
import akka.testkit.TestKitBase
import scala.concurrent.duration._
object DebugTiming {
val debugMode = java.lang.management.ManagementFactory.getRuntimeMXBean.
getInputArguments.toString.indexOf("jdwp") >= 0
val debugTimeout = 10000.seconds
if (debugMode) println(
"\\n##################\\n" +
s"IMPORTANT: Detected system running in debug mode. Test timeouts overridden to $debugTimeout.\\n" +
"##################\\n\\n")
}
trait DebugTiming extends TestKitBase {
import DebugTiming._
override def receiveOne(max: Duration): AnyRef =
if (debugMode) super.receiveOne(debugTimeout)
else super.receiveOne(max)
}
| keshin/squbs | squbs-testkit/src/main/scala/org/squbs/testkit/DebugTiming.scala | Scala | apache-2.0 | 1,274 |
/*
* Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package com.hypertino.facade.filters.annotated
import com.hypertino.binders.value.Obj
import com.hypertino.facade.filter.model.RequestFilter
import com.hypertino.facade.filter.parser.{ExpressionEvaluator, ExpressionEvaluatorContext, PreparedExpression}
import com.hypertino.facade.metrics.MetricKeys
import com.hypertino.facade.model._
import com.hypertino.facade.utils.{HrlTransformer, RequestUtils}
import com.hypertino.hyperbus.model.HRL
import monix.eval.Task
import monix.execution.Scheduler
class ForwardRequestFilter(sourceHRL: HRL,
location: PreparedExpression,
query: Map[String, PreparedExpression],
method: Option[PreparedExpression],
protected val expressionEvaluator: ExpressionEvaluator) extends RequestFilter {
val timer = Some(MetricKeys.specificFilter("ForwardRequestFilter"))
override def apply(requestContext: RequestContext)
(implicit scheduler: Scheduler): Task[RequestContext] = {
Task.now {
val request = requestContext.request
val ctx = ExpressionEvaluatorContext(requestContext, Obj.empty)
val locationEvaluated = expressionEvaluator.evaluate(ctx, location).toString
val queryEvaluated = query.map { kv ⇒
kv._1 → expressionEvaluator.evaluate(ctx, kv._2)
}
val destinationHRL = HRL(locationEvaluated, queryEvaluated)
val destinationMethod = method.map(expressionEvaluator.evaluate(ctx, _).toString)
// todo: should we preserve all query fields???
val rewrittenUri = HrlTransformer.rewriteForwardWithPatterns(request.headers.hrl, sourceHRL, destinationHRL)
requestContext.copy(
request = RequestUtils.copyWith(request, rewrittenUri, destinationMethod)
)
}
}
}
| hypertino/hyperfacade | src/main/scala/com/hypertino/facade/filters/annotated/ForwardRequestFilter.scala | Scala | mpl-2.0 | 2,084 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.test.module.input
import com.bwsw.sj.common.engine.StreamingValidator
/**
* @author Pavel Tomskikh
*/
class Validator extends StreamingValidator
| bwsw/sj-platform | tests/pipeline/sj-input-test/src/main/scala/com/bwsw/sj/test/module/input/Validator.scala | Scala | apache-2.0 | 979 |
/*
* This file is part of eCobertura.
*
* Copyright (c) 2009, 2010 Joachim Hofer
* All rights reserved.
*
* This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package ecobertura.ui.util
object Format {
def asPercentage(numerator: Int, denominator: Int) =
if (denominator == 0) "-"
else "%3.2f %%".format(numerator.toDouble / denominator * 100.0)
}
| jmhofer/eCobertura | ecobertura.ui/src/main/scala/ecobertura/ui/util/Format.scala | Scala | epl-1.0 | 537 |
/**
* Copyright (C) 2015 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.xbl
import org.orbeon.css.CSSSelectorParser
import org.orbeon.css.CSSSelectorParser.Selector
import org.orbeon.dom.Element
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.oxf.xml.Dom4j
import org.orbeon.oxf.xml.dom4j.Dom4jUtils
import org.scalatest.FunSpec
class BindingIndexTest extends FunSpec {
case class TestBinding(
selectors : List[Selector],
selectorsNamespaces : Map[String, String]
) extends IndexableBinding {
val path = None
val lastModified = -1L
}
val FooURI = "http://orbeon.org/oxf/xml/foo"
val Namespaces = Map("foo" → FooURI)
val AllSelectors =
CSSSelectorParser.parseSelectors(
"""
foo|bar,
foo|baz,
[appearance ~= baz],
[appearance = gaga],
[appearance ~= gaga],
[appearance |= gaga],
[appearance ^= gaga],
[appearance $= gaga],
[appearance *= gaga],
foo|bar[appearance ~= baz]
""".trimAllToEmpty
)
val AllBindings =
AllSelectors map (s ⇒ TestBinding(List(s), Namespaces))
val (
fooBarBinding ::
fooBazBinding ::
appearanceTokenBazBinding ::
appearanceIsGagaBinding ::
appearanceTokenGagaBinding ::
appearancePrefixGagaBinding ::
appearanceStartsWithGagaBinding ::
appearanceEndsWithGagaBinding ::
appearanceContainsGagaBinding ::
fooBarAppearanceBazBinding ::
Nil
) = AllBindings
def indexWithAllBindings = {
var currentIndex: BindingIndex[IndexableBinding] = GlobalBindingIndex.Empty
// We wrote the attribute bindings above from more specific to least specific, and the index prepends new
// bindings as we index, so newer bindings are found first. To help with testing matching by attribute, we
// index in reverse order, so that e.g. [appearance ~= baz] is found before [appearance *= gaga].
AllBindings.reverse foreach { binding ⇒
currentIndex = BindingIndex.indexBinding(currentIndex, binding)
}
currentIndex
}
def parseXMLElemWithNamespaces(xmlElem: String): Element = {
val namespacesString =
Namespaces map { case (prefix, uri) ⇒ s"""xmlns:$prefix="$uri"""" } mkString " "
val encapsulated =
s"""<root $namespacesString>$xmlElem</root>"""
Dom4j.elements(Dom4jUtils.readDom4j(encapsulated).getRootElement).head
}
def assertElemMatched(index: BindingIndex[IndexableBinding], xmlElem: String, binding: IndexableBinding) = {
val elem = parseXMLElemWithNamespaces(xmlElem)
val atts = Dom4j.attributes(elem) map (a ⇒ a.getQName → a.getValue)
val found = BindingIndex.findMostSpecificBinding(index, elem.getQName, atts)
it(s"must pass with `$xmlElem`") {
assert(Some(binding) === (found map (_._1)))
}
}
describe("Selector priority") {
val currentIndex = indexWithAllBindings
assertElemMatched(currentIndex, """<foo:bar/>""", fooBarBinding)
assertElemMatched(currentIndex, """<foo:baz/>""", fooBazBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="bar"/>""", fooBarBinding)
assertElemMatched(currentIndex, """<foo:baz appearance="bar"/>""", fooBazBinding)
assertElemMatched(currentIndex, """<foo:baz appearance="baz"/>""", appearanceTokenBazBinding)
assertElemMatched(currentIndex, """<foo:baz appearance="fuzz baz toto"/>""", appearanceTokenBazBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="baz"/>""", fooBarAppearanceBazBinding)
}
describe("Matching by attribute") {
val currentIndex = indexWithAllBindings
assertElemMatched(currentIndex, """<foo:bar appearance="gaga"/>""", appearanceIsGagaBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="fuzz gaga toto"/>""", appearanceTokenGagaBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="gaga toto"/>""", appearanceTokenGagaBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="fuzz gaga"/>""", appearanceTokenGagaBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="gaga-en"/>""", appearancePrefixGagaBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="gagaba"/>""", appearanceStartsWithGagaBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="bagaga"/>""", appearanceEndsWithGagaBinding)
assertElemMatched(currentIndex, """<foo:bar appearance="bagagada"/>""", appearanceContainsGagaBinding)
}
}
| brunobuzzi/orbeon-forms | xforms/jvm/src/test/scala/org/orbeon/oxf/xforms/xbl/BindingIndexTest.scala | Scala | lgpl-2.1 | 5,312 |
package views.vrm_retention
import composition.TestHarness
import controllers.routes.CookiePolicy
import helpers.vrm_retention.CookieFactoryForUISpecs
import org.openqa.selenium.By
import org.openqa.selenium.WebDriver
import org.scalatest.selenium.WebBrowser.{click, currentUrl, go, pageSource, pageTitle}
import pages.common.AlternateLanguages.{isCymraegDisplayed, isEnglishDisplayed}
import pages.vrm_retention.BeforeYouStartPage
import pages.vrm_retention.BeforeYouStartPage.footerItem
import pages.vrm_retention.VehicleLookupPage
import uk.gov.dvla.vehicles.presentation.common.controllers.AlternateLanguages.CyId
import uk.gov.dvla.vehicles.presentation.common.controllers.routes.AlternateLanguages
import uk.gov.dvla.vehicles.presentation.common.testhelpers.{UiSpec, UiTag}
class BeforeYouStartIntegrationSpec extends UiSpec with TestHarness {
"go to page" should {
"display the page" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
currentUrl should equal(BeforeYouStartPage.url)
}
"remove redundant cookies (needed for when a user exits the service and " +
"comes back)" taggedAs UiTag in new WebBrowserForSeleniumWithPhantomJsLocal {
def cacheSetup()(implicit webDriver: WebDriver) =
CookieFactoryForUISpecs.setupBusinessDetails().
businessDetails().
vehicleAndKeeperDetailsModel()
go to BeforeYouStartPage
cacheSetup()
go to BeforeYouStartPage
// Verify the cookies identified by the full set of cache keys have been removed
RelatedCacheKeys.RetainSet.foreach(cacheKey => webDriver.manage().getCookieNamed(cacheKey) should equal(null))
}
"display the global cookie message when cookie 'seen_cookie_message' " +
"does not exist" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
pageSource should include("Find out more about cookies")
}
"display a link to the cookie policy" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
footerItem(index = 0).findElement(By.tagName("a")).getAttribute("href") should
include(CookiePolicy.present().toString())
}
"display a Cymraeg link" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
footerItem(index = 1).findElement(By.tagName("a")).getAttribute("href") should
include(AlternateLanguages.withLanguage(CyId).toString())
}
"change language to welsh when Cymraeg link clicked" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
click on footerItem(index = 1).findElement(By.tagName("a"))
pageTitle should equal(BeforeYouStartPage.titleCy)
}
}
"display the 'Cymraeg' language button and not the 'English' language button when " +
"the play language cookie has value 'en'" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage // By default will load in English.
CookieFactoryForUISpecs.withLanguageEn()
go to BeforeYouStartPage
isCymraegDisplayed should equal(true)
isEnglishDisplayed should equal(false)
}
"display the 'English' language button and not the 'Cymraeg' language button when " +
"the play language cookie has value 'cy'" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage // By default will load in English.
CookieFactoryForUISpecs.withLanguageCy()
go to BeforeYouStartPage
isCymraegDisplayed should equal(false)
isEnglishDisplayed should equal(true)
pageTitle should equal(BeforeYouStartPage.titleCy)
}
"display the 'Cymraeg' language button and not the 'English' language button and mailto when " +
"the play language cookie does not exist " +
"(assumption that the browser default language is English)" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
isCymraegDisplayed should equal(true)
isEnglishDisplayed should equal(false)
}
"startNow button" should {
"go to next page" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
click on BeforeYouStartPage.startNow
currentUrl should equal(VehicleLookupPage.url)
}
}
}
| dvla/vrm-retention-online | test/views/vrm_retention/BeforeYouStartIntegrationSpec.scala | Scala | mit | 4,208 |
package uk.gov.dvla.vehicles.presentation.common.controllers
import com.google.inject.Inject
import play.api.mvc.{Action, Controller}
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory
import uk.gov.dvla.vehicles.presentation.common.views
class PleaseWaitController @Inject()(implicit clientSideSessionFactory: ClientSideSessionFactory)
extends Controller{
def present = Action { implicit request =>
Ok(views.html.pleaseWaitView())
}
}
| dvla/vehicles-presentation-common | common-test/app/uk/gov/dvla/vehicles/presentation/common/controllers/PleaseWaitController.scala | Scala | mit | 489 |
package org.fayalite.gate.server
object PageRender {
val defaultIndexPage = {
import scalatags.Text.all._
// "<!DOCTYPE html>" + // ?Necessary?
html(
scalatags.Text.all.head(
scalatags.Text.tags2.title("fayalite"),
link(rel := "icon", href := "img/fay16x16.png"),
meta(charset := "UTF-8")
)
,
body(
script(
src := "fayalite-fastopt.js",
`type` := "text/javascript"),
script("org.fayalite.sjs.App().main()",
`type` := "text/javascript")
)
).render
}
}
| ryleg/fayalite | gate/src/main/scala/org/fayalite/gate/server/PageRender.scala | Scala | mit | 574 |
package mesosphere.marathon.core.launcher.impl
import mesosphere.marathon.core.launcher.TaskOp
import mesosphere.marathon.core.matcher.base.util.OfferOperationFactory
import mesosphere.marathon.core.task.{ TaskStateOp, Task }
import mesosphere.marathon.state.DiskSource
import mesosphere.marathon.core.task.Task.LocalVolume
import mesosphere.util.state.FrameworkId
import org.apache.mesos.{ Protos => Mesos }
class TaskOpFactoryHelper(
private val principalOpt: Option[String],
private val roleOpt: Option[String]) {
private[this] val offerOperationFactory = new OfferOperationFactory(principalOpt, roleOpt)
def launchEphemeral(
taskInfo: Mesos.TaskInfo,
newTask: Task.LaunchedEphemeral): TaskOp.Launch = {
assume(newTask.taskId.mesosTaskId == taskInfo.getTaskId, "marathon task id and mesos task id must be equal")
def createOperations = Seq(offerOperationFactory.launch(taskInfo))
val stateOp = TaskStateOp.LaunchEphemeral(newTask)
TaskOp.Launch(taskInfo, stateOp, oldTask = None, createOperations)
}
def launchOnReservation(
taskInfo: Mesos.TaskInfo,
newTask: TaskStateOp.LaunchOnReservation,
oldTask: Task.Reserved): TaskOp.Launch = {
def createOperations = Seq(offerOperationFactory.launch(taskInfo))
TaskOp.Launch(taskInfo, newTask, Some(oldTask), createOperations)
}
/**
* Returns a set of operations to reserve ALL resources (cpu, mem, ports, disk, etc.) and then create persistent
* volumes against them as needed
*/
def reserveAndCreateVolumes(
frameworkId: FrameworkId,
newTask: TaskStateOp.Reserve,
resources: Iterable[Mesos.Resource],
localVolumes: Iterable[(DiskSource, LocalVolume)]): TaskOp.ReserveAndCreateVolumes = {
def createOperations = Seq(
offerOperationFactory.reserve(frameworkId, newTask.taskId, resources),
offerOperationFactory.createVolumes(
frameworkId,
newTask.taskId,
localVolumes))
TaskOp.ReserveAndCreateVolumes(newTask, resources, createOperations)
}
}
| timcharper/marathon | src/main/scala/mesosphere/marathon/core/launcher/impl/TaskOpFactoryHelper.scala | Scala | apache-2.0 | 2,041 |
/*
* Copyright 2016 Uncharted Software Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package software.uncharted.salt.core.generation.request
import org.scalatest._
import software.uncharted.salt.core.generation.request._
import software.uncharted.salt.core.projection._
import org.apache.spark.sql.Row
class TileSeqRequestSpec extends FunSpec {
describe("TileSeqRequest") {
describe("#inRequest()") {
it("should facilitate requesting tiles using a sequence of coordinates") {
for (i <- 0 until 100) {
val tiles = Seq(0,1,2,3,4,5).map(a => (a, (Math.random*Math.pow(2, a)).toInt))
val request = new TileSeqRequest(tiles)
tiles.foreach(a => {
assert(request.inRequest(a))
})
assert(!request.inRequest((6, (Math.random*Math.pow(2, 6)).toInt)))
}
}
}
}
}
| unchartedsoftware/salt | src/test/scala/software/uncharted/salt/core/generation/request/TileSeqRequestSpec.scala | Scala | apache-2.0 | 1,378 |
/*
* spark-examples
* Copyright (C) 2015 Emmanuelle Raffenne
*
* This program is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see http://www.gnu.org/licenses/.
*/
package com.example.spark.streaming
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Duration, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
object KafkaConsumer extends App {
override def main(args: Array[String]) {
val master = if ( args.length == 1) args(0) else "local[2]"
val config: SparkConf = new SparkConf().setMaster(master).setAppName("KafkaConsumer")
val sc: SparkContext = new SparkContext(config)
val ssc: StreamingContext = new StreamingContext(sc, new Duration(2000))
val quorum = "localhost:2181"
val group = "Consumers"
val topics = Map("test" -> 1)
val stream = KafkaUtils.createStream(ssc, quorum, group, topics)
val pairs = stream.map( t => (t._1, 1))
val result = pairs.reduceByKey(_ + _)
stream.print()
result.print()
stream.saveAsTextFiles("file:///tmp/kafka", "stream")
ssc.start()
ssc.awaitTermination()
}
}
| eraffenne/spark-streaming-examples | src/main/scala/com/example/spark/streaming/KafkaConsumer.scala | Scala | gpl-3.0 | 1,684 |
package uk.co.morleydev.zander.client.test.unit.data.map
import uk.co.morleydev.zander.client.data.map.CMakeBuildModeBuildTypeMap
import uk.co.morleydev.zander.client.model.arg.BuildMode
import uk.co.morleydev.zander.client.model.arg.BuildMode.BuildMode
import uk.co.morleydev.zander.client.test.unit.UnitTest
class CMakeBuildTypeMapTests extends UnitTest {
private def testCase(buildMode : BuildMode, buildType : String) {
describe("Given a Build Type Build Mode map") {
describe("When mapping %s to %s".format(buildMode, buildType)) {
val result = CMakeBuildModeBuildTypeMap(buildMode)
it("Then the expected result is returned") {
assert(result == buildType)
}
}
}
}
testCase(BuildMode.Debug, "Debug")
testCase(BuildMode.Release, "Release")
}
| MorleyDev/zander.client | src/test/scala/uk/co/morleydev/zander/client/test/unit/data/map/CMakeBuildTypeMapTests.scala | Scala | mit | 811 |
package com.twitter.finatra.tests.json.internal.caseclass.validation.domain
import com.twitter.finatra.validation.NotEmpty
import org.joda.time.DateTime
case class Person(
@NotEmpty name: String,
nickname: String = "unknown",
dob: Option[DateTime] = None,
address: Option[Address] = None)
| tom-chan/finatra | jackson/src/test/scala/com/twitter/finatra/tests/json/internal/caseclass/validation/domain/Person.scala | Scala | apache-2.0 | 303 |
package io.ssc.angles.pipeline.explorers
import com.google.common.collect.{HashMultimap, SetMultimap}
import scala.collection.JavaConverters._
import scala.collection.mutable
/**
* Helper class for managing a set of clusters. Data is backed by two SetMultimaps.
*/
class ClusterSet[T] {
private val explorerSet: mutable.Set[T] = mutable.HashSet.empty[T]
private val clusterToValueMap: SetMultimap[Int, T] = HashMultimap.create()
private val valueToClusterMap: SetMultimap[T, Int] = HashMultimap.create()
private var currentClusterId = -1
def addExplorerToCurrentCluster(explorer: T): Unit = {
if (currentClusterId == -1) {
throw new Exception("newCluster has to be called first")
}
clusterToValueMap.put(currentClusterId, explorer)
valueToClusterMap.put(explorer, currentClusterId)
explorerSet += explorer
}
def newCluster() = {
currentClusterId += 1
}
def getClusterIdsForExplorer(explorer: T): Set[Int] = {
valueToClusterMap.get(explorer).asScala.toSet
}
def getExplorers: Set[T] = {
explorerSet.toSet
}
def getNumClusters = {
if (currentClusterId == -1) {
throw new Exception("newCluster has to be called first")
}
clusterToValueMap.keySet().size()
}
def getCluster(cluster: Int): Iterable[T] = {
clusterToValueMap.asMap().get(cluster).asScala
}
def getClusters(): Iterable[Iterable[T]] = {
clusterToValueMap.asMap().entrySet().asScala.map(e => e.getValue.asScala).toIterable
}
}
| jhendess/angles | src/main/scala/io/ssc/angles/pipeline/explorers/ClusterSet.scala | Scala | gpl-3.0 | 1,502 |
/** **\\
** Copyright (c) 2012 Center for Organic and Medicinal Chemistry **
** Zurich University of Applied Sciences **
** Wädenswil, Switzerland **
\\** **/
package chemf
import scalaz.{Equal, Show}
/**
* @author Stefan Höck
*/
sealed abstract class Bond(val symbol: String, val valence: Int)
object Bond {
case object Single extends Bond("-", 1)
case object Double extends Bond("=", 2)
case object Triple extends Bond("#", 3)
case object Quadruple extends Bond("$", 4)
case object Aromatic extends Bond(":", 0)
val values = List[Bond] (Single, Double, Triple, Quadruple, Aromatic)
implicit val BondEqual = Equal.equalA[Bond]
implicit val BondShow = Show.shows[Bond](_.symbol)
}
// vim: set ts=2 sw=2 et:
| stefan-hoeck/chemf | src/main/scala/chemf/Bond.scala | Scala | gpl-3.0 | 927 |
package io.scalac.seed.route
import akka.pattern.ask
import akka.util.Timeout
import io.scalac.seed.domain.AggregateRoot.Removed
import io.scalac.seed.domain.VehicleAggregate
import io.scalac.seed.service.{UserAggregateManager, VehicleAggregateManager}
import VehicleAggregate.Vehicle
import VehicleAggregateManager.{GetVehicle, RegisterVehicle}
import java.util.UUID
import io.scalac.seed.service.UserAggregateManager.RegisterUser
import org.json4s.{DefaultFormats, JObject}
import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpec}
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import spray.http.{BasicHttpCredentials, StatusCodes}
import spray.testkit.ScalatestRouteTest
class VehicleRouteSpec extends FlatSpec with ScalatestRouteTest with Matchers with VehicleRoute with BeforeAndAfterAll {
implicit val json4sFormats = DefaultFormats
implicit val timeout = Timeout(2.seconds)
def actorRefFactory = system
val vehicleAggregateManager = system.actorOf(VehicleAggregateManager.props)
val userAggregateManager = system.actorOf(UserAggregateManager.props)
implicit val routeTestTimeout = RouteTestTimeout(5.seconds)
val credentials = BasicHttpCredentials("test", "test")
override def beforeAll: Unit = {
val userFuture = userAggregateManager ? RegisterUser("test", "test")
Await.result(userFuture, 5 seconds)
}
"VehicleRoute" should "return not found if non-existing vehicle is requested" in {
Get("/vehicles/" + UUID.randomUUID().toString) ~> vehicleRoute ~> check {
response.status shouldBe StatusCodes.NotFound
}
}
it should "create a vehicle" in {
val regNumber = "123"
val color = "Cerulean"
Post("/vehicles", Map("regNumber" -> regNumber, "color" -> color)) ~> addCredentials(credentials) ~> vehicleRoute ~> check {
response.status shouldBe StatusCodes.Created
val id = (responseAs[JObject] \ "id").extract[String]
val vehicle = getVehicleFromManager(id)
vehicle.regNumber shouldEqual regNumber
vehicle.color shouldEqual color
}
}
it should "return existing vehicle" in {
val regNumber = "456"
val color = "Navajo white"
val vehicle = createVehicleInManager(regNumber, color)
Get(s"/vehicles/" + vehicle.id) ~> vehicleRoute ~> check {
response.status shouldBe StatusCodes.OK
val responseJson = responseAs[JObject]
(responseJson \ "regNumber").extract[String] shouldEqual regNumber
(responseJson \ "color").extract[String] shouldEqual color
}
}
it should "remove vehicle" in {
val vehicle = createVehicleInManager("123", "Pastel pink")
Delete("/vehicles/" + vehicle.id) ~> addCredentials(credentials) ~> vehicleRoute ~> check {
response.status shouldBe StatusCodes.NoContent
val emptyVehicleFuture = (vehicleAggregateManager ? GetVehicle(vehicle.id))
val emptyVehicle = Await.result(emptyVehicleFuture, 2.seconds)
emptyVehicle shouldBe Removed
}
}
it should "update vehicle's regNumber" in {
val vehicle = createVehicleInManager("123", "Persian indigo")
val newRegNumber = "456"
Post(s"/vehicles/${vehicle.id}/regnumber", Map("value" -> newRegNumber)) ~> addCredentials(credentials) ~> vehicleRoute ~> check {
response.status shouldBe StatusCodes.OK
val updatedVehicle = getVehicleFromManager(vehicle.id)
updatedVehicle.regNumber shouldEqual newRegNumber
}
}
it should "update vehicle's color" in {
val vehicle = createVehicleInManager("123", "Cherry blossom pink")
val newColor = "Atomic tangerine"
Post(s"/vehicles/${vehicle.id}/color", Map("value" -> newColor)) ~> addCredentials(credentials) ~> vehicleRoute ~> check {
response.status shouldBe StatusCodes.OK
val updatedVehicle = getVehicleFromManager(vehicle.id)
updatedVehicle.color shouldEqual newColor
}
}
private def getVehicleFromManager(id: String) = {
val vehicleFuture = (vehicleAggregateManager ? GetVehicle(id)).mapTo[Vehicle]
Await.result(vehicleFuture, 2.seconds)
}
private def createVehicleInManager(regNumber: String, color: String) = {
val vehicleFuture = (vehicleAggregateManager ? RegisterVehicle(regNumber, color)).mapTo[Vehicle]
Await.result(vehicleFuture, 2.seconds)
}
} | vimvim/akka-persistence-event-sourcing | src/test/scala/io/scalac/seed/route/VehicleRouteSpec.scala | Scala | apache-2.0 | 4,306 |
/* _____ _
* | ___| __ __ _ _ __ ___ (_) __ _ _ __
* | |_ | '__/ _` | '_ ` _ \\| |/ _` | '_ \\
* | _|| | | (_| | | | | | | | (_| | | | |
* |_| |_| \\__,_|_| |_| |_|_|\\__,_|_| |_|
*
* Copyright 2014 Pellucid Analytics
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package framian
package column
import spire.macros.{ Checked, ArithmeticOverflowException }
private[framian] case class EvalColumn[A](f: Int => Cell[A]) extends BoxedColumn[A] {
override def apply(row: Int): Cell[A] = f(row)
def cellMap[B](g: Cell[A] => Cell[B]): Column[B] = EvalColumn(f andThen g)
def reindex(index: Array[Int]): Column[A] =
DenseColumn.force(EvalColumn(index andThen f), index.length)
def force(len: Int): Column[A] =
DenseColumn.force(this, len)
def mask(mask: Mask): Column[A] = EvalColumn { row =>
if (mask(row)) NA else f(row)
}
def setNA(naRow: Int): Column[A] = EvalColumn { row =>
if (row == naRow) NA else f(row)
}
def memoize(optimistic: Boolean): Column[A] =
if (optimistic) new OptimisticMemoizingColumn(f)
else new PessimisticMemoizingColumn(f)
def orElse[A0 >: A](that: Column[A0]): Column[A0] =
EvalColumn { row =>
f(row) match {
case NM => that(row) match {
case NA => NM
case cell => cell
}
case NA => that(row)
case cell => cell
}
}
def shift(n: Int): Column[A] = EvalColumn { row =>
try {
f(Checked.checked(row - n))
} catch { case (_: ArithmeticOverflowException) =>
// If we overflow, then it means that `row - n` overflowed and, hence,
// wrapped around. Since `shift` is meant to just shift rows, and not
// wrap them back around, we return an NA. So, if we have a `Column`
// defined for all rows, and shift it forward 1 row, then
// `Column(Int.MinValue)` should return `NA`.
NA
}
}
def zipMap[B, C](that: Column[B])(f: (A, B) => C): Column[C] = that match {
case (that: DenseColumn[_]) =>
DenseColumn.zipMap[A, B, C](this.force(that.values.length).asInstanceOf[DenseColumn[A]], that.asInstanceOf[DenseColumn[B]], f)
case _ =>
EvalColumn { row =>
(this(row), that(row)) match {
case (Value(a), Value(b)) => Value(f(a, b))
case (NA, _) | (_, NA) => NA
case _ => NM
}
}
}
}
| tixxit/framian | framian/src/main/scala/framian/column/EvalColumn.scala | Scala | apache-2.0 | 2,883 |
package org.flowpaint.raster.image
import org.flowpaint.model2.RasterRenderer
import org.flowpaint.raster.channel.Channel
import org.flowpaint.raster.channel.Raster
import org.flowpaint.util.Rectangle
import org.flowpaint.raster.channel.{Channel, Raster}
import org.flowpaint.util.Rectangle
import org.flowpaint.raster.tasks.Operation
import org.flowpaint.raster.tile.{TileService, TileId}
/**
*
*/
case class RenderOperation(tiles: Set[TileId],
source: Raster,
destination: FastImage,
renderAlphaAsCheckers: Boolean = true) extends Operation {
var alphaGridSize: Int = 16
var alphaLuminance1 : Float = 0.5f
var alphaLuminance2: Float = 0.7f
var redChannel = 'red
var greenChannel = 'green
var blueChannel = 'blue
var alphaChannel = 'alpha
def description = "Rendering picture"
def affectedTiles = tiles
def doOperation(tileId: TileId) {
// This algorithm isn't really a very optimal way to do this,
// but its a bit hard to optimize due to the the block based design (which is needed for memory optimization reasons)
val red: Channel = source.channels(redChannel)
val green: Channel = source.channels(greenChannel)
val blue: Channel = source.channels(blueChannel)
val alpha: Channel = source.channels(alphaChannel)
val rTile = red.getTile(tileId)
val bTile = green.getTile(tileId)
val gTile = blue.getTile(tileId)
val aTile = alpha.getTile(tileId)
val target = destination.buffer
var x = 0
var y = 0
var i = 0
var di = 0
while (y < TileService.tileHeight) {
x = 0
di = tileId.y1 * destination.width
if (renderAlphaAsCheckers)
// Render alpha value as a solid checkerboard pattern behind visible content
while (x < TileService.tileWidth) {
val a = aTile(i)
if (a >= 1f) {
// Solid
target(di) = 0xFF000000 |
(rTile.getByte(i) << 16) |
(gTile.getByte(i) << 8) |
bTile.getByte(i)
}
else {
// Calculate checkerboard alpha pattern if we have some transparency
val alphaLuminance = if ((x % alphaGridSize * 2 < alphaGridSize) ==
(y % alphaGridSize * 2 < alphaGridSize))
alphaLuminance2
else
alphaLuminance1
val preMultipliedAlphaLuminance = (1f - a) * alphaLuminance
val r = (255 * (preMultipliedAlphaLuminance + rTile(i) * a)).toInt
val g = (255 * (preMultipliedAlphaLuminance + gTile(i) * a)).toInt
val b = (255 * (preMultipliedAlphaLuminance + bTile(i) * a)).toInt
target(di) = 0xFF000000 | (r << 16) | (g << 8) | b
}
i += 1
x += 1
di += 1
}
else
// Render alpha value into highest byte
while (x < TileService.tileWidth) {
target(di) = (aTile.getByte(i) << 24) |
(rTile.getByte(i) << 16) |
(gTile.getByte(i) << 8) |
bTile.getByte(i)
i += 1
x += 1
di += 1
}
y += 1
}
// TODO: Notify to allow image to repaint?
}
}
| zzorn/flowpaint | src/main/scala/org/flowpaint/raster/image/RenderOperation.scala | Scala | gpl-2.0 | 3,407 |
package fpinscala
package monads
import parsing._
import testing._
import parallelism._
import state._
import parallelism.Par._
trait Functor[F[_]] {
def map[A,B](fa: F[A])(f: A => B): F[B]
def distribute[A,B](fab: F[(A, B)]): (F[A], F[B]) =
(map(fab)(_._1), map(fab)(_._2))
def codistribute[A,B](e: Either[F[A], F[B]]): F[Either[A, B]] = e match {
case Left(fa) => map(fa)(Left(_))
case Right(fb) => map(fb)(Right(_))
}
}
object Functor {
val listFunctor = new Functor[List] {
def map[A,B](as: List[A])(f: A => B): List[B] = as map f
}
}
trait Monad[M[_]] extends Functor[M] {
def unit[A](a: => A): M[A]
def flatMap[A,B](ma: M[A])(f: A => M[B]): M[B]
def map[A,B](ma: M[A])(f: A => B): M[B] =
flatMap(ma)(a => unit(f(a)))
def map2[A,B,C](ma: M[A], mb: M[B])(f: (A, B) => C): M[C] =
flatMap(ma)(a => map(mb)(b => f(a, b)))
def sequence[A](lma: List[M[A]]): M[List[A]] =
lma.foldRight(unit(List[A]()))((ma, mla) => map2(ma, mla)(_ :: _))
def traverse[A,B](la: List[A])(f: A => M[B]): M[List[B]] =
la.foldRight(unit(List[B]()))((a, mlb) => map2(f(a), mlb)(_ :: _))
def replicateM[A](n: Int, ma: M[A]): M[List[A]] =
if (n <= 0) unit(List[A]()) else map2(ma, replicateM(n-1, ma))(_ :: _)
def compose[A,B,C](f: A => M[B], g: B => M[C]): A => M[C] =
a => flatMap(f(a))(g)
// Implement in terms of `compose`:
def _flatMap[A,B](ma: M[A])(f: A => M[B]): M[B] =
compose((_: Unit) => ma, f)(())
def join[A](mma: M[M[A]]): M[A] = flatMap(mma)(ma => ma)
// Implement in terms of `join`:
def __flatMap[A,B](ma: M[A])(f: A => M[B]): M[B] =
join(map(ma)(f))
}
case class Reader[R, A](run: R => A)
object Monad {
val genMonad = new Monad[Gen] {
def unit[A](a: => A): Gen[A] = Gen.unit(a)
override def flatMap[A,B](ma: Gen[A])(f: A => Gen[B]): Gen[B] =
ma flatMap f
}
val parMonad: Monad[Par] = ???
def parserMonad[P[+_]](p: Parsers[P]): Monad[P] = ???
val optionMonad: Monad[Option] = new Monad[Option] {
def unit[A](a: => A): Option[A] = Some(a)
override def flatMap[A,B](oa: Option[A])(f: A => Option[B]): Option[B] =
oa flatMap f
}
val streamMonad: Monad[Stream] = new Monad[Stream] {
def unit[A](a: => A): Stream[A] = Stream(a)
override def flatMap[A,B](sa: Stream[A])(f: A => Stream[B]): Stream[B] =
sa flatMap f
}
val listMonad: Monad[List] = new Monad[List] {
def unit[A](a: => A): List[A] = List(a)
override def flatMap[A,B](la: List[A])(f: A => List[B]): List[B] =
la flatMap f
}
def stateMonad[S] = new Monad[({type lambda[x] = State[S,x]})#lambda] {
def unit[A](a: => A): State[S,A] = State(s => (a, s))
override def flatMap[A,B](st: State[S,A])(f: A => State[S,B]): State[S,B] =
st flatMap f
}
val idMonad: Monad[Id] = new Monad[Id] {
def unit[A](a: => A) = Id(a)
override def flatMap[A,B](ida: Id[A])(f: A => Id[B]): Id[B] = ida flatMap f
}
def readerMonad[R] = new Monad[({type f[x] = Reader[R,x]})#f] {
def unit[A](a: => A): Reader[R,A] = Reader(_ => a)
override def flatMap[A,B](st: Reader[R,A])(f: A => Reader[R,B]): Reader[R,B] =
Reader(r => f(st.run(r)).run(r))
}
}
case class Id[A](value: A) {
def map[B](f: A => B): Id[B] = Id(f(value))
def flatMap[B](f: A => Id[B]): Id[B] = f(value)
}
object Reader {
def ask[R]: Reader[R, R] = Reader(r => r)
}
| feynmanliang/fpinscala | exercises/src/main/scala/fpinscala/monads/Monad.scala | Scala | mit | 3,392 |
package main.scala.org.cc
object Ord {
val a : Int = 'a'.toInt
val f : Int = 'f'.toInt
val z : Int = 'z'.toInt
val A : Int = 'A'.toInt
val F : Int = 'F'.toInt
val Z : Int = 'Z'.toInt
val Zero : Int = '0'.toInt
val Nine : Int = '9'.toInt
val plus : Int = '+'.toInt
val minus : Int = '-'.toInt
val underscore : Int = '_'.toInt
final def hex2Int(s : Iterator[Char]) : Int = {
var r : Int = 0
while (s.hasNext) {
val c = s.next.toInt
if (c >= Ord.A && c<= Ord.F ) r = (r << 4) | (c - Ord.A + 10)
else if (c >= Ord.a && c<= Ord.f ) r = (r << 4) | (c - Ord.a + 10)
else if (c >= Ord.Zero && c<= Ord.Nine) r = (r << 4) | (c - Ord.Zero )
}
r
}
// s must be of the form : [+-]*[0-9]* */
final def dec2Int(s : Iterator[Char]) : Int = {
var r : Int = 0
var sign : Int = 1
/* Sign */
while (s.hasNext) {
val c = s.next.toInt
if (c >= Ord.Zero && c<= Ord.Nine) r = (10 * r) + (c - Ord.Zero)
else if (c == minus) sign *= -1
}
sign * r
}
}
| christophe-calves/alpha | src/main/scala/org/cc/Ord.scala | Scala | gpl-3.0 | 1,081 |
package scalariform.lexer
import scalariform._
import scalariform.lexer.Tokens._
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.TestFailedException
import org.scalatest.TestPendingException
import java.io._
/**
* Test full tokeniser, including newline inferencing.
*/
class NewlineInferencerTest extends FlatSpec with ShouldMatchers {
implicit def string2TestString(s: String)(implicit forgiveErrors: Boolean = false, scalaVersion: ScalaVersion = ScalaVersions.DEFAULT) =
new TestString(s, forgiveErrors, scalaVersion);
// See issue #60
"""
a match {
case b =>
val c = d
case e =>
}""" shouldProduceTokens (
VARID, MATCH, LBRACE,
CASE, VARID, ARROW,
VAL, VARID, EQUALS, VARID, NEWLINE,
CASE, VARID, ARROW,
RBRACE)
class TestString(s: String, forgiveErrors: Boolean = false, scalaVersion: ScalaVersion = ScalaVersions.DEFAULT) {
def shouldProduceTokens(toks: TokenType*)() {
check(s.stripMargin, toks.toList)
}
private def check(s: String, expectedTokens: List[TokenType]) {
it should ("tokenise >>>" + s + "<<< as >>>" + expectedTokens + "<<< forgiveErrors = " + forgiveErrors + ", scalaVersion = " + scalaVersion) in {
val actualTokens: List[Token] = ScalaLexer.tokenise(s, forgiveErrors, scalaVersion.toString)
val actualTokenTypes = actualTokens.map(_.tokenType)
require(actualTokenTypes.last == EOF, "Last token must be EOF, but was " + actualTokens.last.tokenType)
require(actualTokenTypes.count(_ == EOF) == 1, "There must only be one EOF token")
val reconstitutedSource = actualTokens.init.map(_.rawText).mkString
require(actualTokenTypes.init == expectedTokens, "Tokens do not match. Expected " + expectedTokens + ", but was " + actualTokenTypes.init)
}
}
}
}
| gangstead/scalariform | scalariform/src/test/scala/scalariform/lexer/NewlineInferencerTest.scala | Scala | mit | 1,879 |
package onion.tools.option
/**
* @author Kota Mizushima
*/
sealed trait CommandLineParam
case class ValuedParam(value: String) extends CommandLineParam
case object NoValuedParam extends CommandLineParam
| onionlang/onion | src/main/scala/onion/tools/option/CommandLineParam.scala | Scala | bsd-3-clause | 206 |
// Copyright 2011-2012 James Michael Callahan
// See LICENSE-2.0 file for licensing information.
package org.scalagfx.math
//--------------------------------------------------------------------------------------------------
// P O S 3 D
//--------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------
// Supported Subset of Operations:
//
// P + S -> P P - S -> P P * S -> P P / V -> P
// V + S -> V V - S -> V V * S -> V V / S -> V
//
// --- P - P -> V --- ---
// P + V -> P P - V -> P P * V -> P P / V -> P
// V + V -> V V - V -> V V * V -> V V / V -> V
//
// S = Scalar(Double), P = Position(Pos3d), V = Vector(Vec3d)
//--------------------------------------------------------------------------------------------------
/** Companion object for Pos3d. */
object Pos3d
{
//------------------------------------------------------------------------------------------------
// C R E A T I O N
//------------------------------------------------------------------------------------------------
/** Create a new position from components. */
def apply(x: Double, y: Double, z: Double) =
new Pos3d(x, y, z)
/** Create a new position in which all components are the same scalar value. */
def apply(s: Double) =
new Pos3d(s, s, s)
/** The origin. */
val origin: Pos3d =
Pos3d(0.0)
//------------------------------------------------------------------------------------------------
// C O M P A R I S O N
//------------------------------------------------------------------------------------------------
/** The component-wise comparison of whether two positions are within a given epsilon. */
def equiv(a: Pos3d, b: Pos3d, epsilon: Double): Boolean =
a.equiv(b, epsilon)
/** The component-wise comparison of whether two positions are within a type specific
* epsilon. */
def equiv(a: Pos3d, b: Pos3d): Boolean =
(a equiv b)
/** The component-wise minimum of two positions. */
def min(a: Pos3d, b: Pos3d): Pos3d =
compwise(a, b, scala.math.min(_, _))
/** The component-wise maximum of two positions. */
def max(a: Pos3d, b: Pos3d): Pos3d =
compwise(a, b, scala.math.max(_, _))
//------------------------------------------------------------------------------------------------
// I N T E R P O L A T I O N
//------------------------------------------------------------------------------------------------
/** Linearly interpolate between two positions. */
def lerp(a: Pos3d, b: Pos3d, t: Double): Pos3d =
compwise(a, b, Scalar.lerp(_, _, t))
/** Smooth-step interpolate between two positions. */
def smoothlerp(a: Pos3d, b: Pos3d, t: Double): Pos3d =
compwise(a, b, Scalar.smoothlerp(_, _, t))
//------------------------------------------------------------------------------------------------
// U T I L I T Y
//------------------------------------------------------------------------------------------------
/** Create a position who's components are generated by applying the given binary operator
* to each of the corresponding components of the given two positions. */
def compwise(a: Pos3d, b: Pos3d, f: (Double, Double) => Double): Pos3d =
Pos3d(f(a.x, b.x), f(a.y, b.y), f(a.z, b.z))
}
/** An immutable 3-dimensional vector of Double element type used to represent a position in
* space for use in computational geometry applications.
*
* This is not meant to be a general purpose vector, but rather to only defined the limited
* set of operations which make geometric sense. This allows Scala type checking to catch
* many of the most common errors where scalars, vectors or positions are being accidently
* used in a way that is geometrically meaningless. */
class Pos3d(val x: Double, val y: Double, val z: Double) extends Vector3dLike
{
type Self = Pos3d
//------------------------------------------------------------------------------------------------
// C O M P O N E N T O P S
//------------------------------------------------------------------------------------------------
/** A copy of this position in which the X component has been replaced with the given
* value. */
def newX(v: Double): Pos3d =
Pos3d(v, y, z)
/** A copy of this position in which Y component has been replaced with the given value. */
def newY(v: Double): Pos3d =
Pos3d(x, v, z)
/** A copy of this position in which Z component has been replaced with the given value. */
def newZ(v: Double): Pos3d =
Pos3d(x, y, v)
/** A copy of this position in which the component with the given index as been replaced. */
def newComp(i: Int, v: Double) =
i match {
case 0 => Pos3d(v, y, z)
case 1 => Pos3d(x, v, z)
case 2 => Pos3d(x, y, v)
case _ => throw new IllegalArgumentException("Invalid index (" + i + ")!")
}
//------------------------------------------------------------------------------------------------
// U N A R Y O P S
//------------------------------------------------------------------------------------------------
/** A position reflected about the origin. */
def negated: Pos3d = Pos3d(-x, -y, -z)
//------------------------------------------------------------------------------------------------
// O P E R A T O R S
//------------------------------------------------------------------------------------------------
/** The addition of a scalar to all components of this position. */
def + (scalar: Double): Pos3d = Pos3d(x+scalar, y+scalar, z+scalar)
/** The component-wise addition of a vector with this position. */
def + (that: Vec3d): Pos3d = Pos3d(x+that.x, y+that.y, z+that.z)
/** The subtraction of a scalar value to all components of this position. */
def - (scalar: Double): Pos3d = Pos3d(x-scalar, y-scalar, z-scalar)
/** The component-wise subtraction a vector from this position. */
def - (that: Vec3d): Pos3d = Pos3d(x-that.x, y-that.y, z-that.z)
/** The vector from the given position to this position. */
def - (that: Pos3d): Vec3d = Vec3d(x-that.x, y-that.y, z-that.z)
/** The product of a scalar value with all components of this position. */
def * (scalar: Double): Pos3d = Pos3d(x*scalar, y*scalar, z*scalar)
/** The component-wise multiplication of a vector with this position. */
def * (that: Vec3d): Pos3d = Pos3d(x*that.x, y*that.y, z*that.z)
/** The quotient of dividing all components of this position by a scalar value. */
def / (scalar: Double): Pos3d = Pos3d(x/scalar, y/scalar, z/scalar)
/** The component-wise division of this position by a vector. */
def / (that: Vec3d): Pos3d = Pos3d(x/that.x, y/that.y, z/that.z)
//------------------------------------------------------------------------------------------------
// C O M P A R I S O N
//------------------------------------------------------------------------------------------------
/** Compares this position to the specified value for equality. */
override def equals(that: Any): Boolean =
that match {
case that: Pos3d =>
(that canEqual this) && (x == that.x) && (y == that.y) && (z == that.z)
case _ => false
}
/** A method that should be called from every well-designed equals method that is open
* to be overridden in a subclass. */
def canEqual(that: Any): Boolean =
that.isInstanceOf[Pos3d]
/** Returns a hash code value for the object. */
override def hashCode: Int =
47 * (43 * (41 + x.##) + y.##) + z.##
//------------------------------------------------------------------------------------------------
// U T I L I T Y
//------------------------------------------------------------------------------------------------
/** Tests whether the given predicate holds true for all of the corresponding components
* of this and the given position. */
def forall(that: Pos3d)(p: (Double, Double) => Boolean): Boolean =
p(x, that.x) && p(y, that.y) && p(z, that.z)
/** Tests whether the given predicate holds true for any of the corresponding components
* of this and the given position. */
def forany(that: Pos3d)(p: (Double, Double) => Boolean): Boolean =
p(x, that.x) || p(y, that.y) || p(z, that.z)
/** Builds a new position by applying a function to each component of this position. */
def map(f: (Double) => Double): Pos3d =
Pos3d(f(x), f(y), f(z))
//------------------------------------------------------------------------------------------------
// C O N V E R S I O N
//------------------------------------------------------------------------------------------------
/** Convert to a string representation. */
override def toString() =
"Pos3d(%.2f, %.2f, %.2f)".format(x, y, z)
}
| JimCallahan/Graphics | src/org/scalagfx/math/Pos3d.scala | Scala | apache-2.0 | 10,079 |
package com.github
import scala.collection.mutable
import scala.util.Random
import akka.actor.Props
import com.github.core.actors.SlaServiceMock
import com.github.model.Sla
class SlaTokenToUserTest extends ActorTestTemplate("SlaTokenToUserSystem") {
"An SlaService" must {
val sla = system.actorOf(Props[SlaServiceMock])
"Must return same user on same token" in {
val token = Random.alphanumeric.take(5).mkString
val set = mutable.Set[String]()
sla ! token
expectMsgPF() {
case Sla(user, _) =>
set += user
println(1)
}
Thread.sleep(10)
sla ! token
expectMsgPF() {
case Sla(user, _) =>
set += user
println(2)
}
Thread.sleep(10)
sla ! token
expectMsgPF() {
case Sla(user, _) =>
set += user
println(3)
}
println(set)
set.size shouldEqual 1
}
}
}
| Kibaras/Throttling-Service | src/test/scala/com/github/SlaTokenToUserTest.scala | Scala | apache-2.0 | 942 |
package io.toolsplus.atlassian.connect.play.ws.jwt
import java.net.URI
import io.toolsplus.atlassian.connect.play.api.models.AtlassianHost
import io.toolsplus.atlassian.connect.play.auth.jwt.symmetric.JwtGenerator
import play.api.http.HeaderNames.{AUTHORIZATION, USER_AGENT}
import play.api.libs.ws.WSSignatureCalculator
import play.shaded.ahc.org.asynchttpclient.{Request, RequestBuilderBase, SignatureCalculator}
class JwtSignatureCalculator(host: AtlassianHost, jwtGenerator: JwtGenerator)
extends WSSignatureCalculator
with SignatureCalculator {
override def calculateAndAddSignature(
request: Request,
requestBuilder: RequestBuilderBase[_]): Unit = {
generateJwt(request, host).map { jwt =>
request.getHeaders
.set(USER_AGENT, JwtSignatureCalculator.userAgent)
.set(AUTHORIZATION, s"JWT $jwt")
}
}
private def generateJwt(request: Request, host: AtlassianHost) = {
jwtGenerator.createJwtToken(request.getMethod,
URI.create(request.getUrl),
host)
}
}
object JwtSignatureCalculator {
val userAgent = "atlassian-connect-play"
def apply(jwtGenerator: JwtGenerator)(
implicit host: AtlassianHost): JwtSignatureCalculator = {
new JwtSignatureCalculator(host, jwtGenerator)
}
}
| toolsplus/atlassian-connect-play | modules/core/app/io/toolsplus/atlassian/connect/play/ws/jwt/JwtSignatureCalculator.scala | Scala | apache-2.0 | 1,325 |
package de.unihamburg.vsis.sddf.visualisation.model
import org.apache.spark.rdd.RDD
import de.unihamburg.vsis.sddf.reading.SymPair
import de.unihamburg.vsis.sddf.reading.Tuple
class GoldstandardModel extends BasicAnalysable {
var _goldstandard: Option[RDD[SymPair[Tuple]]] = None
def goldstandard = _goldstandard
def goldstandard_=(goldstandard: RDD[SymPair[Tuple]]) = _goldstandard = Option(goldstandard)
lazy val goldstandardSize = {
if (goldstandard.isDefined) {
goldstandard.get.count()
} else {
throw new Exception("No goldstandard present")
}
}
}
| numbnut/sddf | src/main/scala/de/unihamburg/vsis/sddf/visualisation/model/GoldstandardModel.scala | Scala | gpl-3.0 | 593 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import org.apache.spark.sql.{Row, QueryTest}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.Utils
class PartitionedWriteSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("write many partitions") {
val path = Utils.createTempDir()
path.delete()
val df = ctx.range(100).select($"id", lit(1).as("data"))
df.write.partitionBy("id").save(path.getCanonicalPath)
checkAnswer(
ctx.read.load(path.getCanonicalPath),
(0 to 99).map(Row(1, _)).toSeq)
Utils.deleteRecursively(path)
}
test("write many partitions with repeats") {
val path = Utils.createTempDir()
path.delete()
val base = ctx.range(100)
val df = base.unionAll(base).select($"id", lit(1).as("data"))
df.write.partitionBy("id").save(path.getCanonicalPath)
checkAnswer(
ctx.read.load(path.getCanonicalPath),
(0 to 99).map(Row(1, _)).toSeq ++ (0 to 99).map(Row(1, _)).toSeq)
Utils.deleteRecursively(path)
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala | Scala | apache-2.0 | 1,890 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.helptosavefrontend.models.eligibility
import play.api.libs.json.{Format, Json}
sealed trait EligibilityCheckResultType {
val value: EligibilityCheckResponse
}
object EligibilityCheckResultType {
case class Eligible(value: EligibilityCheckResponse) extends EligibilityCheckResultType
case class Ineligible(value: EligibilityCheckResponse) extends EligibilityCheckResultType
case class AlreadyHasAccount(value: EligibilityCheckResponse) extends EligibilityCheckResultType
object Eligible {
implicit val format: Format[Eligible] = Json.format[Eligible]
}
object Ineligible {
implicit val format: Format[Ineligible] = Json.format[Ineligible]
}
implicit class Ops(val result: EligibilityCheckResultType) extends AnyVal {
def fold[A](
ifEligible: EligibilityCheckResponse ⇒ A,
ifIneligible: EligibilityCheckResponse ⇒ A,
ifAlreadyHasAccount: EligibilityCheckResponse ⇒ A
): A = result match {
case Eligible(reason) ⇒ ifEligible(reason)
case Ineligible(reason) ⇒ ifIneligible(reason)
case AlreadyHasAccount(reason) ⇒ ifAlreadyHasAccount(reason)
}
}
}
| hmrc/help-to-save-frontend | app/uk/gov/hmrc/helptosavefrontend/models/eligibility/EligibilityCheckResultType.scala | Scala | apache-2.0 | 1,767 |
package io.youi.drawable
trait Drawable extends Modifiable {
def draw(context: Context, x: Double, y: Double): Unit
}
object Drawable {
object None extends Drawable {
override def draw(context: Context, x: Double, y: Double): Unit = {}
}
} | outr/youi | ui/js/src/main/scala/io/youi/drawable/Drawable.scala | Scala | mit | 251 |
/*
* Copyright 2014 Andrey Kutyrev
*
* Licensed under the the GNU Public License v3.0;
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.gnu.org/licenses/gpl.html
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ========================================================================
*/
package squ1b3r.thingummies.proxy
import java.io.File
import cpw.mods.fml.common.Loader
import cpw.mods.fml.common.Mod.EventHandler
import cpw.mods.fml.common.event.{FMLInitializationEvent, FMLPreInitializationEvent, FMLPostInitializationEvent}
import net.minecraftforge.common.config.Configuration
import squ1b3r.thingummies.blocks.{BlockRecipes, ModBlocks}
import squ1b3r.thingummies.handler.ConfigurationHandler
import squ1b3r.thingummies.integration.Chisel.ThingummiesChisel
import squ1b3r.thingummies.integration.FMP.ThingummiesFMP
import squ1b3r.thingummies.items.{ItemRecipes, ModItems}
import squ1b3r.thingummies.reference.Reference
class CommonProxy {
/**
* FML preInit
*
* @param event FML event
*/
@EventHandler
def preInit(event: FMLPreInitializationEvent): Unit = {
ConfigurationHandler.loadConfiguration(
new Configuration(new File(event.getModConfigurationDirectory, Reference.ModID + "/main.cfg"))
)
ModBlocks.preInit()
ModItems.preInit()
}
/**
* FML Init
*
* @param event FML event
*/
@EventHandler
def init(event: FMLInitializationEvent): Unit = {
// Recipes
BlockRecipes.init()
if (Loader.isModLoaded(Reference.dependentMods.RedstoneArsenalID))
ItemRecipes.init()
if (Loader.isModLoaded(Reference.dependentMods.ForgeMultipartID))
ThingummiesFMP.registerBlocks()
if (Loader.isModLoaded(Reference.dependentMods.ChiselID))
ThingummiesChisel.registerBlocks()
}
/**
* FML postInit
*
* @param event FML event
*/
@EventHandler
def postInit(event: FMLPostInitializationEvent): Unit = {}
}
| squ1b3r/Thingummies | src/main/scala/squ1b3r/thingummies/proxy/CommonProxy.scala | Scala | gpl-3.0 | 2,283 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package synthesis
package rules
import purescala.Expressions._
import purescala.ExprOps._
import purescala.Constructors._
import purescala.TypeOps._
case object UnconstrainedOutput extends NormalizingRule("Unconstr.Output") {
def instantiateOn(implicit hctx: SearchContext, p: Problem): Traversable[RuleInstantiation] = {
if (!p.hasOutputTests) {
val unconstr = (p.xs.toSet -- variablesOf(p.phi)).filter { x =>
isRealExpr(simplestValue(x.getType))
}
if (unconstr.nonEmpty) {
val sub = p.copy(xs = p.xs.filterNot(unconstr), eb = p.qeb.removeOuts(unconstr))
val onSuccess: List[Solution] => Option[Solution] = {
case List(s) =>
val term = letTuple(sub.xs, s.term, tupleWrap(p.xs.map(id => if (unconstr(id)) simplestValue(id.getType) else Variable(id))))
Some(Solution(s.pre, s.defs, term, s.isTrusted))
case _ =>
None
}
Some(decomp(List(sub), onSuccess, s"Unconst. out ${p.xs.filter(unconstr).mkString(", ")}"))
} else {
None
}
} else {
None
}
}
}
| regb/leon | src/main/scala/leon/synthesis/rules/UnconstrainedOutput.scala | Scala | gpl-3.0 | 1,164 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import com.intellij.lang.ASTNode
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.tree.TokenSet
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiElement, ResolveState}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.ScFieldId
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScCaseClause, ScCaseClauses}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScDeclaredElementsHolder, ScFunction, ScTypeAlias}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.{ScalaPsiElementFactory, ScalaPsiManager}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.{ScDesignatorType, ScProjectionType}
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, Success, TypeResult, TypingContext}
import scala.collection.immutable.HashSet
import scala.collection.mutable
/**
* Author: ilyas, alefas
*/
trait ScBlock extends ScExpression with ScDeclarationSequenceHolder with ScImportsHolder {
protected override def innerType(ctx: TypingContext): TypeResult[ScType] = {
if (hasCaseClauses) {
val caseClauses = findChildByClassScala(classOf[ScCaseClauses])
val clauses: Seq[ScCaseClause] = caseClauses.caseClauses
val clausesType = clauses.foldLeft(Nothing: ScType)((tp, clause) => tp.lub(clause.expr match {
case Some(expr) => expr.getType(TypingContext.empty).getOrNothing
case _ => Nothing
}))
getContext match {
case c: ScCatchBlock =>
val manager = ScalaPsiManager.instance(getProject)
val funs = manager.getCachedClasses(getResolveScope, "scala.PartialFunction")
val fun = funs.find(_.isInstanceOf[ScTrait]).getOrElse(return Failure("Cannot find PartialFunction class", Some(this)))
val throwable = manager.getCachedClass(getResolveScope, "java.lang.Throwable").orNull
if (throwable == null) return Failure("Cannot find Throwable class", Some(this))
return Success(ScParameterizedType(ScDesignatorType(fun), Seq(ScDesignatorType(throwable), clausesType)), Some(this))
case _ =>
val et = expectedType(fromUnderscore = false).getOrElse(return Failure("Cannot infer type without expected type", Some(this)))
def removeVarianceAbstracts(scType: ScType) = {
var index = 0
scType.recursiveVarianceUpdate((tp: ScType, i: Int) => {
tp match {
case ScAbstractType(_, lower, upper) =>
i match {
case -1 => (true, lower)
case 1 => (true, upper)
case 0 => (true, ScExistentialArgument(s"_$$${index += 1; index}", Nil, lower, upper))
}
case _ => (false, tp)
}
}, 1).unpackedType
}
return et match {
case f@FunctionType(_, params) =>
Success(FunctionType(clausesType, params.map(removeVarianceAbstracts))
(getProject, getResolveScope), Some(this))
case f@PartialFunctionType(_, param) =>
Success(PartialFunctionType(clausesType, removeVarianceAbstracts(param))
(getProject, getResolveScope), Some(this))
case _ =>
Failure("Cannot infer type without expected type of scala.FunctionN or scala.PartialFunction", Some(this))
}
}
}
val inner = lastExpr match {
case None =>
ScalaPsiUtil.fileContext(this) match {
case scalaFile: ScalaFile if scalaFile.isCompiled => Nothing
case _ => Unit
}
case Some(e) =>
val m = new mutable.HashMap[String, ScExistentialArgument]
def existize(t: ScType, visited: HashSet[ScType]): ScType = {
if (visited.contains(t)) return t
val visitedWithT = visited + t
t match {
case ScDesignatorType(p: ScParameter) if p.owner.isInstanceOf[ScFunctionExpr] && p.owner.asInstanceOf[ScFunctionExpr].result.contains(this) =>
val t = existize(p.getType(TypingContext.empty).getOrAny, visitedWithT)
val ex = new ScExistentialArgument(p.name, Nil, t, t)
m.put(p.name, ex)
ex
case ScDesignatorType(typed: ScBindingPattern) if typed.nameContext.isInstanceOf[ScCaseClause] &&
typed.nameContext.asInstanceOf[ScCaseClause].expr.contains(this) =>
val t = existize(typed.getType(TypingContext.empty).getOrAny, visitedWithT)
val ex = new ScExistentialArgument(typed.name, Nil, t, t)
m.put(typed.name, ex)
ex
case ScDesignatorType(des) if PsiTreeUtil.isContextAncestor(this, des, true) => des match {
case obj: ScObject =>
val t = existize(leastClassType(obj), visitedWithT)
val ex = new ScExistentialArgument(obj.name, Nil, t, t)
m.put(obj.name, ex)
ex
case clazz: ScTypeDefinition =>
val t = existize(leastClassType(clazz), visitedWithT)
val vars = clazz.typeParameters.map(TypeParameterType(_, None)).toList
val ex = new ScExistentialArgument(clazz.name, vars, t, t)
m.put(clazz.name, ex)
ex
case typed: ScTypedDefinition =>
val t = existize(typed.getType(TypingContext.empty).getOrAny, visitedWithT)
val ex = new ScExistentialArgument(typed.name, Nil, t, t)
m.put(typed.name, ex)
ex
case _ => t
}
case proj@ScProjectionType(p, elem, s) => ScProjectionType(existize(p, visitedWithT), elem, s)
case ScCompoundType(comps, signatureMap, typesMap) =>
new ScCompoundType(comps.map(existize(_, visitedWithT)), signatureMap.map {
case (s: Signature, tp) =>
def updateTypeParam: TypeParameter => TypeParameter = {
case TypeParameter(typeParameters, lowerType, upperType, psiTypeParameter) =>
TypeParameter(typeParameters.map(updateTypeParam),
new Suspension(existize(lowerType.v, visitedWithT)),
new Suspension(existize(upperType.v, visitedWithT)),
psiTypeParameter)
}
val pTypes: List[Seq[() => ScType]] =
s.substitutedTypes.map(_.map(f => () => existize(f(), visitedWithT)))
val tParams = s.typeParams.subst(updateTypeParam)
val rt: ScType = existize(tp, visitedWithT)
(new Signature(s.name, pTypes, s.paramLength, tParams,
ScSubstitutor.empty, s.namedElement match {
case fun: ScFunction =>
ScFunction.getCompoundCopy(pTypes.map(_.map(_()).toList), tParams.toList, rt, fun)
case b: ScBindingPattern => ScBindingPattern.getCompoundCopy(rt, b)
case f: ScFieldId => ScFieldId.getCompoundCopy(rt, f)
case named => named
}, s.hasRepeatedParam), rt)
}, typesMap.map {
case (s, sign) => (s, sign.updateTypes(existize(_, visitedWithT)))
})
case JavaArrayType(argument) => JavaArrayType(existize(argument, visitedWithT))
case ParameterizedType(des, typeArgs) =>
ScParameterizedType(existize(des, visitedWithT), typeArgs.map(existize(_, visitedWithT)))
case ex@ScExistentialType(q, wildcards) =>
new ScExistentialType(existize(q, visitedWithT), wildcards.map {
ex => new ScExistentialArgument(ex.name, ex.args, existize(ex.lower, visitedWithT), existize(ex.upper, visitedWithT))
})
case _ => t
}
}
val t = existize(e.getType(TypingContext.empty).getOrAny, HashSet.empty)
if (m.isEmpty) t else new ScExistentialType(t, m.values.toList).simplify()
}
Success(inner, Some(this))
}
private def leastClassType(t : ScTemplateDefinition): ScType = {
val (holders, aliases): (Seq[ScDeclaredElementsHolder], Seq[ScTypeAlias]) = t.extendsBlock.templateBody match {
case Some(b: ScTemplateBody) =>
// jzaugg: Without these type annotations, a class cast exception occured above. I'm not entirely sure why.
(b.holders: Seq[ScDeclaredElementsHolder], b.aliases: Seq[ScTypeAlias])
case None => (Seq.empty, Seq.empty)
}
val superTypes = t.extendsBlock.superTypes
if (superTypes.length > 1 || holders.nonEmpty || aliases.nonEmpty) {
ScCompoundType.fromPsi(superTypes, holders.toList, aliases.toList)
} else superTypes.head
}
def hasCaseClauses: Boolean = false
def isInCatchBlock: Boolean = getContext.isInstanceOf[ScCatchBlock]
def isAnonymousFunction = hasCaseClauses && !isInCatchBlock
def exprs: Seq[ScExpression] = findChildrenByClassScala(classOf[ScExpression]).toSeq
def statements: Seq[ScBlockStatement] = findChildrenByClassScala(classOf[ScBlockStatement]).toSeq
def hasRBrace: Boolean = getNode.getChildren(TokenSet.create(ScalaTokenTypes.tRBRACE)).length == 1
def getRBrace: Option[ASTNode] = getNode.getChildren(TokenSet.create(ScalaTokenTypes.tRBRACE)) match {
case Array(node) => Some(node)
case _ => None
}
def lastExpr = findLastChild(classOf[ScExpression])
def lastStatement = findLastChild(classOf[ScBlockStatement])
def addDefinition(decl: ScMember, before: PsiElement): Boolean = {
getNode.addChild(decl.getNode,before.getNode)
getNode.addChild(ScalaPsiElementFactory.createNewLineNode(getManager), before.getNode)
true
}
override def processDeclarations(processor: PsiScopeProcessor,
state : ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean =
super[ScDeclarationSequenceHolder].processDeclarations(processor, state, lastParent, place) &&
super[ScImportsHolder].processDeclarations(processor, state, lastParent, place)
def needCheckExpectedType = true
}
object ScBlock {
def unapplySeq(block: ScBlock): Option[Seq[ScBlockStatement]] = Option(block.statements)
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScBlock.scala | Scala | apache-2.0 | 10,846 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.net
import java.net.{URI, URISyntaxException}
import org.junit.Assert._
import org.junit.Test
import org.scalajs.testsuite.utils.AssertThrows._
import org.scalajs.testsuite.utils.Platform.executingInJVM
class URITest {
def expectURI(uri: URI, isAbsolute: Boolean, isOpaque: Boolean)(
authority: String = null, fragment: String = null,
host: String = null, path: String = null, port: Int = -1,
query: String = null, scheme: String = null, userInfo: String = null,
schemeSpecificPart: String = null)(rawAuthority: String = authority,
rawFragment: String = fragment, rawPath: String = path,
rawQuery: String = query, rawUserInfo: String = userInfo,
rawSchemeSpecificPart: String = schemeSpecificPart): Unit = {
assertEquals(authority, uri.getAuthority())
assertEquals(fragment, uri.getFragment())
assertEquals(host, uri.getHost())
assertEquals(path, uri.getPath())
assertEquals(port, uri.getPort())
assertEquals(query, uri.getQuery())
assertEquals(rawAuthority, uri.getRawAuthority())
assertEquals(rawFragment, uri.getRawFragment())
assertEquals(rawPath, uri.getRawPath())
assertEquals(rawQuery, uri.getRawQuery())
assertEquals(rawSchemeSpecificPart, uri.getRawSchemeSpecificPart())
assertEquals(rawUserInfo, uri.getRawUserInfo())
assertEquals(scheme, uri.getScheme())
assertEquals(schemeSpecificPart, uri.getSchemeSpecificPart())
assertEquals(userInfo, uri.getUserInfo())
assertEquals(isAbsolute, uri.isAbsolute())
assertEquals(isOpaque, uri.isOpaque())
}
@Test def should_parse_vanilla_absolute_URIs(): Unit = {
expectURI(new URI("http://java.sun.com/j2se/1.3/"), true, false)(
scheme = "http",
host = "java.sun.com",
path = "/j2se/1.3/",
authority = "java.sun.com",
schemeSpecificPart = "//java.sun.com/j2se/1.3/")()
}
@Test def should_parse_absolute_URIs_with_empty_path(): Unit = {
expectURI(new URI("http://foo:bar"), true, false)(
authority = "foo:bar",
path = "",
scheme = "http",
schemeSpecificPart = "//foo:bar")()
}
@Test def should_parse_absolute_URIs_with_IPv6(): Unit = {
val uri = new URI("http://hans@[ffff::0:128.4.5.3]:345/~hans/")
expectURI(uri, true, false)(
scheme = "http",
host = "[ffff::0:128.4.5.3]",
userInfo = "hans",
port = 345,
path = "/~hans/",
authority = "hans@[ffff::0:128.4.5.3]:345",
schemeSpecificPart = "//hans@[ffff::0:128.4.5.3]:345/~hans/")()
}
@Test def should_parse_absolute_URIs_without_authority(): Unit = {
expectURI(new URI("file:/~/calendar"), true, false)(
scheme = "file",
path = "/~/calendar",
schemeSpecificPart = "/~/calendar")()
}
@Test def should_parse_absolute_URIs_with_empty_authority(): Unit = {
expectURI(new URI("file:///~/calendar"), true, false)(
scheme = "file",
path = "/~/calendar",
schemeSpecificPart = "///~/calendar")()
}
@Test def should_parse_opaque_URIs(): Unit = {
expectURI(new URI("mailto:java-net@java.sun.com"), true, true)(
scheme = "mailto",
schemeSpecificPart = "java-net@java.sun.com")()
expectURI(new URI("news:comp.lang.java"), true, true)(
scheme = "news",
schemeSpecificPart = "comp.lang.java")()
expectURI(new URI("urn:isbn:096139210x"), true, true)(
scheme = "urn",
schemeSpecificPart = "isbn:096139210x")()
}
@Test def should_parse_relative_URIs(): Unit = {
expectURI(new URI("docs/guide/collections/designfaq.html#28"), false, false)(
path = "docs/guide/collections/designfaq.html",
fragment = "28",
schemeSpecificPart = "docs/guide/collections/designfaq.html")()
expectURI(new URI("../../../demo/jfc/SwingSet2/src/SwingSet2.java"), false, false)(
path = "../../../demo/jfc/SwingSet2/src/SwingSet2.java",
schemeSpecificPart = "../../../demo/jfc/SwingSet2/src/SwingSet2.java")()
}
@Test def should_parse_relative_URIs_with_IPv4(): Unit = {
expectURI(new URI("//123.5.6.3:45/bar"), false, false)(
authority = "123.5.6.3:45",
host = "123.5.6.3",
port = 45,
path = "/bar",
schemeSpecificPart = "//123.5.6.3:45/bar")()
}
@Test def should_parse_relative_URIs_with_registry_based_authority(): Unit = {
expectURI(new URI("//foo:bar"), false, false)(
authority = "foo:bar",
path = "",
schemeSpecificPart = "//foo:bar")()
}
@Test def should_parse_relative_URIs_with_escapes(): Unit = {
expectURI(new URI("//ma%5dx:secret@example.com:8000/foo"), false, false)(
authority = "ma]x:secret@example.com:8000",
userInfo = "ma]x:secret",
host = "example.com",
port = 8000,
path = "/foo",
schemeSpecificPart = "//ma]x:secret@example.com:8000/foo")(
rawUserInfo = "ma%5dx:secret",
rawAuthority = "ma%5dx:secret@example.com:8000",
rawSchemeSpecificPart = "//ma%5dx:secret@example.com:8000/foo")
}
@Test def should_parse_relative_URIs_with_fragment_only(): Unit = {
expectURI(new URI("#foo"), false, false)(
fragment = "foo",
path = "",
schemeSpecificPart = "")()
}
@Test def should_parse_relative_URIs_with_query_and_fragment(): Unit = {
expectURI(new URI("?query=1#foo"), false, false)(
query = "query=1",
fragment = "foo",
path = "",
schemeSpecificPart = "?query=1")()
}
@Test def should_provide_compareTo(): Unit = {
val x = new URI("http://example.com/asdf%6a")
val y = new URI("http://example.com/asdf%6A")
val z = new URI("http://example.com/asdfj")
val rel = new URI("/foo/bar")
assertTrue(x.compareTo(y) > 0)
assertTrue(x.compareTo(z) < 0)
assertTrue(y.compareTo(z) < 0)
assertEquals(0, x.compareTo(x))
assertEquals(0, y.compareTo(y))
assertEquals(0, z.compareTo(z))
assertTrue(x.compareTo(rel) > 0)
assertTrue(y.compareTo(rel) > 0)
assertTrue(z.compareTo(rel) > 0)
assertEquals(0, rel.compareTo(rel))
}
@Test def should_provide_equals(): Unit = {
val x = new URI("http://example.com/asdf%6a")
val y = new URI("http://example.com/asdf%6A")
val z = new URI("http://example.com/asdfj")
assertTrue(x == y)
assertFalse(x == z)
assertFalse(y == z)
assertTrue(x == x)
assertTrue(y == y)
assertTrue(z == z)
assertNotEquals(new URI("foo:helloWorld%6b%6C"), new URI("foo:helloWorld%6C%6b"))
}
@Test def should_provide_normalize(): Unit = {
expectURI(new URI("http://example.com/../asef/../../").normalize, true, false)(
scheme = "http",
host = "example.com",
authority = "example.com",
path = "/../../",
schemeSpecificPart = "//example.com/../../")()
expectURI(new URI("http://example.com/../as/./ef/foo/../../").normalize, true, false)(
scheme = "http",
host = "example.com",
authority = "example.com",
path = "/../as/",
schemeSpecificPart = "//example.com/../as/")()
expectURI(new URI("bar/../fo:o/./bar").normalize, false, false)(
path = "./fo:o/bar",
schemeSpecificPart = "./fo:o/bar")()
expectURI(new URI("bar/..//fo:o//./bar").normalize, false, false)(
path = "./fo:o/bar",
schemeSpecificPart = "./fo:o/bar")()
expectURI(new URI("").normalize, false, false)(
path = "",
schemeSpecificPart = "")()
val x = new URI("http://www.example.com/foo/bar")
assertTrue(x.normalize eq x)
}
@Test def should_provide_resolve__JavaDoc_examples(): Unit = {
val base = "http://java.sun.com/j2se/1.3/"
val relative1 = "docs/guide/collections/designfaq.html#28"
val resolved1 =
"http://java.sun.com/j2se/1.3/docs/guide/collections/designfaq.html#28"
val relative2 = "../../../demo/jfc/SwingSet2/src/SwingSet2.java"
val resolved2 =
"http://java.sun.com/j2se/1.3/demo/jfc/SwingSet2/src/SwingSet2.java"
assertEquals(resolved1, new URI(base).resolve(relative1).toString)
assertEquals(resolved2, new URI(resolved1).resolve(relative2).toString)
assertEquals("/a/", new URI("").resolve("/a/").toString)
assertEquals("/a/", new URI("/a/").resolve("").toString)
}
@Test def should_provide_resolve_RFC2396_examples(): Unit = {
val base = new URI("http://a/b/c/d;p?q")
def resTest(ref: String, trg: String): Unit =
assertEquals(trg, base.resolve(ref).toString)
// Normal examples
resTest("g:h", "g:h")
resTest("g", "http://a/b/c/g")
resTest("./g", "http://a/b/c/g")
resTest("g/", "http://a/b/c/g/")
resTest("/g", "http://a/g")
resTest("//g", "http://g")
resTest("?y", "http://a/b/c/?y")
resTest("g?y", "http://a/b/c/g?y")
resTest("#s", "http://a/b/c/d;p?q#s")
resTest("g#s", "http://a/b/c/g#s")
resTest("g?y#s", "http://a/b/c/g?y#s")
resTest(";x", "http://a/b/c/;x")
resTest("g;x", "http://a/b/c/g;x")
resTest("g;x?y#s", "http://a/b/c/g;x?y#s")
resTest(".", "http://a/b/c/")
resTest("./", "http://a/b/c/")
resTest("..", "http://a/b/")
resTest("../", "http://a/b/")
resTest("../g", "http://a/b/g")
resTest("../..", "http://a/")
resTest("../../", "http://a/")
resTest("../../g", "http://a/g")
// Abnormal examples
resTest("../../../g", "http://a/../g")
resTest("../../../../g", "http://a/../../g")
resTest("/./g", "http://a/./g")
resTest("/../g", "http://a/../g")
resTest("g.", "http://a/b/c/g.")
resTest(".g", "http://a/b/c/.g")
resTest("g..", "http://a/b/c/g..")
resTest("..g", "http://a/b/c/..g")
resTest("./../g", "http://a/b/g")
resTest("./g/.", "http://a/b/c/g/")
resTest("g/./h", "http://a/b/c/g/h")
resTest("g/../h", "http://a/b/c/h")
resTest("g;x=1/./y", "http://a/b/c/g;x=1/y")
resTest("g;x=1/../y", "http://a/b/c/y")
resTest("g?y/./x", "http://a/b/c/g?y/./x")
resTest("g?y/../x", "http://a/b/c/g?y/../x")
resTest("g#s/./x", "http://a/b/c/g#s/./x")
resTest("g#s/../x", "http://a/b/c/g#s/../x")
resTest("http:g", "http:g")
}
@Test def should_provide_resolve_when_authority_is_empty__issue_2048(): Unit = {
val base = new URI("http://foo/a")
def resTest(ref: String, trg: String): Unit =
assertEquals(trg, base.resolve(ref).toString)
resTest("///a", "http://foo/a")
resTest("/b", "http://foo/b")
resTest("/b/../d", "http://foo/b/../d")
}
@Test def should_provide_normalize__examples_derived_from_RFC_relativize(): Unit = {
expectURI(new URI("http://a/b/c/..").normalize, true, false)(
scheme = "http",
host = "a",
authority = "a",
path = "/b/",
schemeSpecificPart = "//a/b/")()
expectURI(new URI("http://a/b/c/.").normalize, true, false)(
scheme = "http",
host = "a",
authority = "a",
path = "/b/c/",
schemeSpecificPart = "//a/b/c/")()
}
@Test def should_provide_relativize(): Unit = {
val x = new URI("http://f%4Aoo@asdf/a")
val y = new URI("http://fJoo@asdf/a/b/")
val z = new URI("http://f%4aoo@asdf/a/b/")
assertTrue(x.relativize(y) eq y)
assertEquals("b/", x.relativize(z).toString())
def relTest(base: String, trg: String, exp: String): Unit =
assertEquals(exp, new URI(base).relativize(new URI(trg)).toString())
relTest("http://a.ch/a", "http://a.ch/a/b", "b")
relTest("http://a.ch/a/", "http://a.ch/a/b", "b")
relTest("https://a.ch/a", "http://a.ch/a/b", "http://a.ch/a/b")
relTest("/a/b/c", "/a/b/c/d/e", "d/e")
relTest("/a/b/c/", "/a/b/c/d/e", "d/e")
relTest("/a/b/c/", "/a/b/c/foo:e/d", "foo:e/d") // see bug JDK-7037120
relTest("../a/b", "../a/b/c", "c")
relTest("../a/b", "", "")
relTest("", "../a/b", "../a/b")
relTest("file:///a", "file:///a/b/", "b/")
relTest("file:/c", "file:///c/d/", "d/")
}
@Test def should_provide_hashCode(): Unit = {
if (!executingInJVM) { // Fails on JDK6 and JDK7
assertEquals(new URI("http://example.com/asdf%6a").hashCode,
new URI("http://example.com/asdf%6A").hashCode)
}
}
@Test def should_allow_non_ASCII_characters(): Unit = {
expectURI(new URI("http://cs.dbpedia.org/resource/Víno"), true, false)(
scheme = "http",
host = "cs.dbpedia.org",
path = "/resource/Víno",
authority = "cs.dbpedia.org",
schemeSpecificPart = "//cs.dbpedia.org/resource/Víno")()
}
@Test def should_decode_UTF_8(): Unit = {
expectURI(new URI("http://cs.dbpedia.org/resource/V%C3%ADno"), true, false)(
scheme = "http",
host = "cs.dbpedia.org",
path = "/resource/Víno",
authority = "cs.dbpedia.org",
schemeSpecificPart = "//cs.dbpedia.org/resource/Víno")(
rawPath = "/resource/V%C3%ADno",
rawSchemeSpecificPart = "//cs.dbpedia.org/resource/V%C3%ADno")
expectURI(new URI("%e3%81%93a%e3%82%93%e3%81%AB%e3%81%a1%e3%81%af"), false, false)(
path = "こaんにちは",
schemeSpecificPart = "こaんにちは")(
rawPath = "%e3%81%93a%e3%82%93%e3%81%AB%e3%81%a1%e3%81%af",
rawSchemeSpecificPart = "%e3%81%93a%e3%82%93%e3%81%AB%e3%81%a1%e3%81%af")
}
@Test def should_support_toASCIIString(): Unit = {
def cmp(base: String, encoded: String): Unit =
assertEquals(encoded, new URI(base).toASCIIString())
cmp("http://cs.dbpedia.org/resource/Víno",
"http://cs.dbpedia.org/resource/V%C3%ADno")
cmp("http://こaんにちは/",
"http://%E3%81%93a%E3%82%93%E3%81%AB%E3%81%A1%E3%81%AF/")
cmp("foo://bar/\\uD800\\uDCF5/",
"foo://bar/%F0%90%83%B5/")
}
@Test def should_replace_when_bad_surrogates_are_present(): Unit = {
expectURI(new URI("http://booh/%E3a"), true, false)(
scheme = "http",
host = "booh",
path = "/�a",
authority = "booh",
schemeSpecificPart = "//booh/�a")(
rawPath = "/%E3a",
rawSchemeSpecificPart = "//booh/%E3a")
// lowercase e is kept
expectURI(new URI("http://booh/%e3a"), true, false)(
scheme = "http",
host = "booh",
path = "/�a",
authority = "booh",
schemeSpecificPart = "//booh/�a")(
rawPath = "/%e3a",
rawSchemeSpecificPart = "//booh/%e3a")
// %E3%81 is considered as 1 malformed
expectURI(new URI("http://booh/%E3%81a"), true, false)(
scheme = "http",
host = "booh",
path = "/�a",
authority = "booh",
schemeSpecificPart = "//booh/�a")(
rawPath = "/%E3%81a",
rawSchemeSpecificPart = "//booh/%E3%81a")
if (!executingInJVM) { // Fails on JDK6 and JDK7
// %E3%E3 is considered as 2 malformed
expectURI(new URI("http://booh/%E3%E3a"), true, false)(
scheme = "http",
host = "booh",
path = "/��a",
authority = "booh",
schemeSpecificPart = "//booh/��a")(
rawPath = "/%E3%E3a",
rawSchemeSpecificPart = "//booh/%E3%E3a")
}
}
@Test def should_throw_on_bad_escape_sequences(): Unit = {
expectThrows(classOf[URISyntaxException], new URI("http://booh/%E"))
expectThrows(classOf[URISyntaxException], new URI("http://booh/%Ep"))
}
}
| SebsLittleHelpers/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/net/URITest.scala | Scala | apache-2.0 | 15,682 |
/**
*
* Copyright (C) 2017 University of Bamberg, Software Technologies Research Group
* <https://www.uni-bamberg.de/>, <http://www.swt-bamberg.de/>
*
* This file is part of the Data Structure Investigator (DSI) project, which received financial support by the
* German Research Foundation (DFG) under grant no. LU 1748/4-1, see
* <http://www.swt-bamberg.de/dsi/>.
*
* DSI is licensed under the GNU GENERAL PUBLIC LICENSE (Version 3), see
* the LICENSE file at the project's top-level directory for details or consult <http://www.gnu.org/licenses/>.
*
* DSI is free software: you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or any later version.
*
* DSI is a RESEARCH PROTOTYPE and distributed WITHOUT ANY
* WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* The following people contributed to the conception and realization of the present DSI distribution (in
* alphabetic order by surname):
*
* - Jan H. Boockmann
* - Gerald Lüttgen
* - Thomas Rupprecht
* - David H. White
*
*/
/**
* @author DSI
*
* DsOliLValue.scala created on Sep 23, 2014
*
* Description: Represents the l-value of
* assignment in the event trace
*/
package event
/**
* @author DSI
*
*/
class DsOliLValue(
val address: Long,
val typeString: String,
val codeFragment: String) {
} | uniba-swt/DSIsrc | src/event/DsOliLValue.scala | Scala | gpl-3.0 | 1,541 |
/*
* Copyright 2014-2015 Sphonic Ltd. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.websudos.phantom.connectors
import com.datastax.driver.core.{Cluster, Session}
import scala.collection.concurrent.TrieMap
/**
* The default SessionProvider implementation, which should be sufficient
* for the most use cases.
*
* This implementation caches `Session` instances per keySpace.
*/
class DefaultSessionProvider(builder: ClusterBuilder) extends SessionProvider {
private val sessionCache = new Cache[String, Session]
lazy val cluster: Cluster = {
// TODO - the original phantom modules had .withoutJMXReporting().withoutMetrics() as defaults, discuss best choices
val cb = Cluster.builder
builder(cb).build
}
/**
* Initializes the keySpace with the given name on
* the specified Session.
*/
protected def initKeySpace(session: Session, keySpace: String): Session = {
session.execute(s"CREATE KEYSPACE IF NOT EXISTS $keySpace WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};")
session
}
/**
* Creates a new Session for the specified keySpace.
*/
protected[this] def createSession(keySpace: String): Session = {
val session = cluster.connect
initKeySpace(session, keySpace)
}
def getSession(keySpace: String): Session = {
sessionCache.getOrElseUpdate(keySpace, createSession(keySpace))
}
}
/**
* Thread-safe cache implementation.
*
* Given the expected use cases (a map with often just one or at most
* a handful of elements in it and being accessed infrequently), this
* implementation is not aggressively optimized and focusses on thread-safety.
*/
class Cache[K, V] {
/* this implementation uses putIfAbsent from the underlying TrieMap as
* getOrElseUpdate is not thread-safe. */
private[this] val map = TrieMap[K, Lazy]()
private[this] class Lazy(value: => V) {
lazy val get: V = value
}
/**
* Get the element for the specified key
* if it has already been set or otherwise
* associate the key with the given (lazy) value.
*
* @return the value previously associated with the key
* or (if no value had been previously set) the specified new value.
*/
def getOrElseUpdate(key: K, op: => V): V = {
val lazyOp = new Lazy(op)
map.putIfAbsent(key, lazyOp) match {
case Some(oldval) =>
// don't evaluate the new lazyOp, return existing value
oldval.get
case _ =>
// no existing value for key, evaluate lazyOp
lazyOp.get
}
}
} | nkijak/phantom | phantom-connectors/src/main/scala/com/websudos/phantom/connectors/DefaultSessionProvider.scala | Scala | bsd-2-clause | 3,084 |
package com.karasiq.shadowcloud.test.storage
import java.nio.file.Files
import akka.stream.scaladsl.{Keep, Source}
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.util.ByteString
import com.karasiq.shadowcloud.model.Path
import com.karasiq.shadowcloud.storage._
import com.karasiq.shadowcloud.storage.repository.{KeyValueRepository, PathTreeRepository, RepositoryKeys}
import com.karasiq.shadowcloud.streams.utils.ByteStreams
import com.karasiq.shadowcloud.test.utils.{CoreTestUtils, SCExtensionSpec, TestUtils}
import org.scalatest.FlatSpecLike
class RepositoryTest extends SCExtensionSpec with FlatSpecLike {
"In-memory repository" should "store chunk" in {
testRepository(Repositories.inMemory)
}
"File repository" should "store chunk" in {
testRepository(PathTreeRepository.toKeyValue(Repositories.fromDirectory(Files.createTempDirectory("crp-test")), Path.root / "default"))
}
/* it should "validate path" in {
intercept[IllegalArgumentException](Repositories.fromDirectory(Files.createTempFile("crp-test", "file")))
} */
private[this] def testRepository(repository: KeyValueRepository): Unit = {
val chunk = CoreTestUtils.randomChunk
val testRepository = RepositoryKeys.toHexString(repository)
// Write chunk
val (write, writeResult) = TestSource.probe[ByteString]
.toMat(testRepository.write(chunk.checksum.hash))(Keep.both)
.run()
write.sendNext(chunk.data.plain)
write.sendComplete()
whenReady(writeResult) { result ⇒
result.isSuccess shouldBe true
result.count should not be 0
}
// Enumerate chunks
val keys = testRepository.keys.runWith(TestSink.probe)
keys.requestNext(chunk.checksum.hash)
keys.request(1)
keys.expectComplete()
// Read chunk
val read = testRepository.read(chunk.checksum.hash)
.via(ByteStreams.concat)
.runWith(TestSink.probe)
read.requestNext(chunk.data.plain)
read.expectComplete()
// Rewrite error
val rewriteBytes = TestUtils.randomBytes(chunk.data.plain.length)
val rewriteResult = Source.single(rewriteBytes)
.runWith(testRepository.write(chunk.checksum.hash))
whenReady(rewriteResult) { result ⇒
result.isFailure shouldBe true
}
// Delete
val deleteResult = Source.single(chunk.checksum.hash).runWith(testRepository.delete)
whenReady(deleteResult) { result ⇒
val StorageIOResult.Success(_, count) = result
count shouldBe chunk.data.plain.length
val keys = testRepository.keys.runWith(TestSink.probe)
keys.request(1)
keys.expectComplete()
}
}
}
| Karasiq/shadowcloud | core/assembly/src/test/scala/com/karasiq/shadowcloud/test/storage/RepositoryTest.scala | Scala | apache-2.0 | 2,632 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.