code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30 values | license stringclasses 15 values | size int64 3 1.01M |
|---|---|---|---|---|---|
#include <stdlib.h>
#include <string.h>
#include <vrpn_Connection.h>
#include <vrpn_Mutex.h>
int main (int argc, char ** argv) {
vrpn_Connection * c;
vrpn_Mutex_Server * me;
int portno = vrpn_DEFAULT_LISTEN_PORT_NO;
if (argc > 2) {
portno = atoi(argv[2]);
}
char con_name[512];
sprintf(con_name, "localhost:%d", portno);
c = vrpn_create_server_connection(con_name);
me = new vrpn_Mutex_Server (argv[1], c);
while (1) {
me->mainloop();
}
}
| ccccjason/my_osvr | vendor/vrpn/server_src/test_mutexServer.C | C++ | apache-2.0 | 479 |
/*
* Copyright 2016 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class WindowInfo {
constructor() {
this.props = [];
}
propList() {
for (var prop in window) {
this.props.push(prop);
}
}
list() {
log(this.props.join(', '));
}
}
(new WindowInfo()).list();
| google/closure-compiler-npm | packages/google-closure-compiler/test/fixtures/two.js | JavaScript | apache-2.0 | 839 |
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Example - example-ng-if-jquery</title>
<link href="animations.css" rel="stylesheet" type="text/css">
<script src="../../components/jquery-3.1.1/jquery.js"></script>
<script src="../../../angular.js"></script>
<script src="../../../angular-animate.js"></script>
</head>
<body ng-app="ngAnimate">
<label>Click me: <input type="checkbox" ng-model="checked" ng-init="checked=true" /></label><br/>
Show when checked:
<span ng-if="checked" class="animate-if">
This is removed when the checkbox is unchecked.
</span>
</body>
</html> | chdyi/Angular.js | BookStore/app/framework/angular-1.6.2/docs/examples/example-ng-if/index-jquery.html | HTML | apache-2.0 | 622 |
/**
* Copyright 2005-2014 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package io.fabric8.commands;
import java.io.PrintStream;
import java.util.List;
import io.fabric8.api.FabricService;
import io.fabric8.api.Profile;
import io.fabric8.api.ProfileService;
import io.fabric8.api.Version;
import io.fabric8.utils.Strings;
import io.fabric8.utils.TablePrinter;
import org.apache.felix.gogo.commands.Command;
import org.apache.felix.gogo.commands.Option;
import org.apache.karaf.shell.console.AbstractAction;
import static io.fabric8.commands.support.CommandUtils.sortProfiles;
@Command(name = ProfileList.FUNCTION_VALUE, scope = ProfileList.SCOPE_VALUE, description = ProfileList.DESCRIPTION)
public class ProfileListAction extends AbstractAction {
@Option(name = "--version", description = "Specifies the version of the profiles to list. Defaults to the current default version.")
private String versionId;
@Option(name = "--hidden", description = "Display hidden profiles")
private boolean hidden;
private final FabricService fabricService;
ProfileListAction(FabricService fabricService) {
this.fabricService = fabricService;
}
public FabricService getFabricService() {
return fabricService;
}
@Override
protected Object doExecute() throws Exception {
ProfileService profileService = fabricService.adapt(ProfileService.class);
Version version = versionId != null ? profileService.getRequiredVersion(versionId) : fabricService.getRequiredDefaultVersion();
List<Profile> profiles = version.getProfiles();
profiles = sortProfiles(profiles);
printProfiles(profileService, profiles, System.out);
return null;
}
protected void printProfiles(ProfileService profileService, List<Profile> profiles, PrintStream out) {
TablePrinter table = new TablePrinter();
table.columns("id", "# containers", "parents");
for (Profile profile : profiles) {
String versionId = profile.getVersion();
String profileId = profile.getId();
// skip profiles that do not exists (they may have been deleted)
if (profileService.hasProfile(versionId, profileId) && (hidden || !profile.isHidden())) {
int active = fabricService.getAssociatedContainers(versionId, profileId).length;
String parents = Strings.join(profile.getParentIds(), " ");
table.row(profileId, activeContainerCountText(active), parents);
}
}
table.print();
}
public static String activeContainerCountText(int active) {
return (active > 0) ? "" + active : "";
}
}
| hekonsek/fabric8 | sandbox/fabric/fabric-commands/src/main/java/io/fabric8/commands/ProfileListAction.java | Java | apache-2.0 | 3,254 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.executor
import java.io.{File, FileOutputStream}
import java.net.{URI, URL, URLClassLoader}
import java.util.concurrent._
import org.apache.hadoop.fs.FileUtil
import scala.collection.mutable.{ArrayBuffer, Map, HashMap}
import spark.broadcast._
import spark.scheduler._
import spark._
import java.nio.ByteBuffer
/**
* The Mesos executor for Spark.
*/
private[spark] class Executor(executorId: String, slaveHostname: String, properties: Seq[(String, String)]) extends Logging {
// Application dependencies (added through SparkContext) that we've fetched so far on this node.
// Each map holds the master's timestamp for the version of that file or JAR we got.
private val currentFiles: HashMap[String, Long] = new HashMap[String, Long]()
private val currentJars: HashMap[String, Long] = new HashMap[String, Long]()
private val EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new Array[Byte](0))
initLogging()
// No ip or host:port - just hostname
Utils.checkHost(slaveHostname, "Expected executed slave to be a hostname")
// must not have port specified.
assert (0 == Utils.parseHostPort(slaveHostname)._2)
// Make sure the local hostname we report matches the cluster scheduler's name for this host
Utils.setCustomHostname(slaveHostname)
// Set spark.* system properties from executor arg
for ((key, value) <- properties) {
System.setProperty(key, value)
}
// Create our ClassLoader and set it on this thread
private val urlClassLoader = createClassLoader()
private val replClassLoader = addReplClassLoaderIfNeeded(urlClassLoader)
Thread.currentThread.setContextClassLoader(replClassLoader)
// Make any thread terminations due to uncaught exceptions kill the entire
// executor process to avoid surprising stalls.
Thread.setDefaultUncaughtExceptionHandler(
new Thread.UncaughtExceptionHandler {
override def uncaughtException(thread: Thread, exception: Throwable) {
try {
logError("Uncaught exception in thread " + thread, exception)
// We may have been called from a shutdown hook. If so, we must not call System.exit().
// (If we do, we will deadlock.)
if (!Utils.inShutdown()) {
if (exception.isInstanceOf[OutOfMemoryError]) {
System.exit(ExecutorExitCode.OOM)
} else {
System.exit(ExecutorExitCode.UNCAUGHT_EXCEPTION)
}
}
} catch {
case oom: OutOfMemoryError => Runtime.getRuntime.halt(ExecutorExitCode.OOM)
case t: Throwable => Runtime.getRuntime.halt(ExecutorExitCode.UNCAUGHT_EXCEPTION_TWICE)
}
}
}
)
val executorSource = new ExecutorSource(this)
// Initialize Spark environment (using system properties read above)
val env = SparkEnv.createFromSystemProperties(executorId, slaveHostname, 0, false, false)
SparkEnv.set(env)
env.metricsSystem.registerSource(executorSource)
private val akkaFrameSize = env.actorSystem.settings.config.getBytes("akka.remote.netty.message-frame-size")
// Start worker thread pool
val threadPool = new ThreadPoolExecutor(
1, 128, 600, TimeUnit.SECONDS, new SynchronousQueue[Runnable])
def launchTask(context: ExecutorBackend, taskId: Long, serializedTask: ByteBuffer) {
threadPool.execute(new TaskRunner(context, taskId, serializedTask))
}
class TaskRunner(context: ExecutorBackend, taskId: Long, serializedTask: ByteBuffer)
extends Runnable {
override def run() {
val startTime = System.currentTimeMillis()
SparkEnv.set(env)
Thread.currentThread.setContextClassLoader(replClassLoader)
val ser = SparkEnv.get.closureSerializer.newInstance()
logInfo("Running task ID " + taskId)
context.statusUpdate(taskId, TaskState.RUNNING, EMPTY_BYTE_BUFFER)
var attemptedTask: Option[Task[Any]] = None
var taskStart: Long = 0
try {
SparkEnv.set(env)
Accumulators.clear()
val (taskFiles, taskJars, taskBytes) = Task.deserializeWithDependencies(serializedTask)
updateDependencies(taskFiles, taskJars)
val task = ser.deserialize[Task[Any]](taskBytes, Thread.currentThread.getContextClassLoader)
attemptedTask = Some(task)
logInfo("Its generation is " + task.generation)
env.mapOutputTracker.updateGeneration(task.generation)
taskStart = System.currentTimeMillis()
val value = task.run(taskId.toInt)
val taskFinish = System.currentTimeMillis()
task.metrics.foreach{ m =>
m.hostname = Utils.localHostName
m.executorDeserializeTime = (taskStart - startTime).toInt
m.executorRunTime = (taskFinish - taskStart).toInt
}
//TODO I'd also like to track the time it takes to serialize the task results, but that is huge headache, b/c
// we need to serialize the task metrics first. If TaskMetrics had a custom serialized format, we could
// just change the relevants bytes in the byte buffer
val accumUpdates = Accumulators.values
val result = new TaskResult(value, accumUpdates, task.metrics.getOrElse(null))
val serializedResult = ser.serialize(result)
logInfo("Serialized size of result for " + taskId + " is " + serializedResult.limit)
if (serializedResult.limit >= (akkaFrameSize - 1024)) {
context.statusUpdate(taskId, TaskState.FAILED, ser.serialize(TaskResultTooBigFailure()))
return
}
context.statusUpdate(taskId, TaskState.FINISHED, serializedResult)
logInfo("Finished task ID " + taskId)
} catch {
case ffe: FetchFailedException => {
val reason = ffe.toTaskEndReason
context.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
}
case t: Throwable => {
val serviceTime = (System.currentTimeMillis() - taskStart).toInt
val metrics = attemptedTask.flatMap(t => t.metrics)
metrics.foreach{m => m.executorRunTime = serviceTime}
val reason = ExceptionFailure(t.getClass.getName, t.toString, t.getStackTrace, metrics)
context.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
// TODO: Should we exit the whole executor here? On the one hand, the failed task may
// have left some weird state around depending on when the exception was thrown, but on
// the other hand, maybe we could detect that when future tasks fail and exit then.
logError("Exception in task ID " + taskId, t)
//System.exit(1)
}
}
}
}
/**
* Create a ClassLoader for use in tasks, adding any JARs specified by the user or any classes
* created by the interpreter to the search path
*/
private def createClassLoader(): ExecutorURLClassLoader = {
var loader = this.getClass.getClassLoader
// For each of the jars in the jarSet, add them to the class loader.
// We assume each of the files has already been fetched.
val urls = currentJars.keySet.map { uri =>
new File(uri.split("/").last).toURI.toURL
}.toArray
new ExecutorURLClassLoader(urls, loader)
}
/**
* If the REPL is in use, add another ClassLoader that will read
* new classes defined by the REPL as the user types code
*/
private def addReplClassLoaderIfNeeded(parent: ClassLoader): ClassLoader = {
val classUri = System.getProperty("spark.repl.class.uri")
if (classUri != null) {
logInfo("Using REPL class URI: " + classUri)
try {
val klass = Class.forName("spark.repl.ExecutorClassLoader")
.asInstanceOf[Class[_ <: ClassLoader]]
val constructor = klass.getConstructor(classOf[String], classOf[ClassLoader])
return constructor.newInstance(classUri, parent)
} catch {
case _: ClassNotFoundException =>
logError("Could not find spark.repl.ExecutorClassLoader on classpath!")
System.exit(1)
null
}
} else {
return parent
}
}
/**
* Download any missing dependencies if we receive a new set of files and JARs from the
* SparkContext. Also adds any new JARs we fetched to the class loader.
*/
private def updateDependencies(newFiles: HashMap[String, Long], newJars: HashMap[String, Long]) {
synchronized {
// Fetch missing dependencies
for ((name, timestamp) <- newFiles if currentFiles.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory))
currentFiles(name) = timestamp
}
for ((name, timestamp) <- newJars if currentJars.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory))
currentJars(name) = timestamp
// Add it to our class loader
val localName = name.split("/").last
val url = new File(SparkFiles.getRootDirectory, localName).toURI.toURL
if (!urlClassLoader.getURLs.contains(url)) {
logInfo("Adding " + url + " to class loader")
urlClassLoader.addURL(url)
}
}
}
}
}
| rjpower/spark | core/src/main/scala/spark/executor/Executor.scala | Scala | apache-2.0 | 10,042 |
<ag-grid-angular
id="transaction-table"
class="ag-theme-balham"
style="width:100% ;height:100%"
[gridOptions]="gridOptions"
[rowData]="rowData"
(gridReady)="onGridReady($event)"
(gridSizeChanged)="onGridSizeChanged($event)"
(firstDataRendered)="onRendered()">
</ag-grid-angular>
| suraj-raturi/pinpoint | web/src/main/angular/src/app/core/components/transaction-table-grid/transaction-table-grid.component.html | HTML | apache-2.0 | 311 |
/*
* Copyright (C) 2014 The Android Open Source Project
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.jar;
import java.io.*;
import java.net.URL;
import java.util.*;
import java.security.*;
import java.security.cert.CertificateException;
import java.util.zip.ZipEntry;
/* J2ObjC removed.
import sun.misc.JarIndex;
*/
import sun.security.util.ManifestDigester;
import sun.security.util.ManifestEntryVerifier;
import sun.security.util.SignatureFileVerifier;
import sun.security.util.Debug;
/**
*
* @author Roland Schemers
*/
class JarVerifier {
/* J2ObjC added: copied from sun.misc.JarIndex */
public static final String INDEX_NAME = "META-INF/INDEX.LIST";
/* Are we debugging ? */
static final Debug debug = Debug.getInstance("jar");
/* a table mapping names to code signers, for jar entries that have
had their actual hashes verified */
private Hashtable<String, CodeSigner[]> verifiedSigners;
/* a table mapping names to code signers, for jar entries that have
passed the .SF/.DSA/.EC -> MANIFEST check */
private Hashtable<String, CodeSigner[]> sigFileSigners;
/* a hash table to hold .SF bytes */
private Hashtable<String, byte[]> sigFileData;
/** "queue" of pending PKCS7 blocks that we couldn't parse
* until we parsed the .SF file */
private ArrayList<SignatureFileVerifier> pendingBlocks;
/* cache of CodeSigner objects */
private ArrayList<CodeSigner[]> signerCache;
/* Are we parsing a block? */
private boolean parsingBlockOrSF = false;
/* Are we done parsing META-INF entries? */
private boolean parsingMeta = true;
/* Are there are files to verify? */
private boolean anyToVerify = true;
/* The output stream to use when keeping track of files we are interested
in */
private ByteArrayOutputStream baos;
/** The ManifestDigester object */
private volatile ManifestDigester manDig;
/** the bytes for the manDig object */
byte manifestRawBytes[] = null;
/** controls eager signature validation */
boolean eagerValidation;
/** makes code source singleton instances unique to us */
private Object csdomain = new Object();
/** collect -DIGEST-MANIFEST values for blacklist */
private List<Object> manifestDigests;
public JarVerifier(byte rawBytes[]) {
manifestRawBytes = rawBytes;
sigFileSigners = new Hashtable<>();
verifiedSigners = new Hashtable<>();
sigFileData = new Hashtable<>(11);
pendingBlocks = new ArrayList<>();
baos = new ByteArrayOutputStream();
manifestDigests = new ArrayList<>();
}
/**
* This method scans to see which entry we're parsing and
* keeps various state information depending on what type of
* file is being parsed.
*/
public void beginEntry(JarEntry je, ManifestEntryVerifier mev)
throws IOException
{
if (je == null)
return;
if (debug != null) {
debug.println("beginEntry "+je.getName());
}
String name = je.getName();
/*
* Assumptions:
* 1. The manifest should be the first entry in the META-INF directory.
* 2. The .SF/.DSA/.EC files follow the manifest, before any normal entries
* 3. Any of the following will throw a SecurityException:
* a. digest mismatch between a manifest section and
* the SF section.
* b. digest mismatch between the actual jar entry and the manifest
*/
if (parsingMeta) {
String uname = name.toUpperCase(Locale.ENGLISH);
if ((uname.startsWith("META-INF/") ||
uname.startsWith("/META-INF/"))) {
if (je.isDirectory()) {
mev.setEntry(null, je);
return;
}
/* J2ObjC modified: platform-specific implementation to avoid importing JarIndex */
if (uname.equals(JarFile.MANIFEST_NAME) ||
uname.equals(INDEX_NAME)) {
return;
}
if (SignatureFileVerifier.isBlockOrSF(uname)) {
/* We parse only DSA, RSA or EC PKCS7 blocks. */
parsingBlockOrSF = true;
baos.reset();
mev.setEntry(null, je);
return;
}
// If a META-INF entry is not MF or block or SF, they should
// be normal entries. According to 2 above, no more block or
// SF will appear. Let's doneWithMeta.
}
}
if (parsingMeta) {
doneWithMeta();
}
if (je.isDirectory()) {
mev.setEntry(null, je);
return;
}
// be liberal in what you accept. If the name starts with ./, remove
// it as we internally canonicalize it with out the ./.
if (name.startsWith("./"))
name = name.substring(2);
// be liberal in what you accept. If the name starts with /, remove
// it as we internally canonicalize it with out the /.
if (name.startsWith("/"))
name = name.substring(1);
// only set the jev object for entries that have a signature
// (either verified or not)
if (sigFileSigners.get(name) != null ||
verifiedSigners.get(name) != null) {
mev.setEntry(name, je);
return;
}
// don't compute the digest for this entry
mev.setEntry(null, je);
return;
}
/**
* update a single byte.
*/
public void update(int b, ManifestEntryVerifier mev)
throws IOException
{
if (b != -1) {
if (parsingBlockOrSF) {
baos.write(b);
} else {
mev.update((byte)b);
}
} else {
processEntry(mev);
}
}
/**
* update an array of bytes.
*/
public void update(int n, byte[] b, int off, int len,
ManifestEntryVerifier mev)
throws IOException
{
if (n != -1) {
if (parsingBlockOrSF) {
baos.write(b, off, n);
} else {
mev.update(b, off, n);
}
} else {
processEntry(mev);
}
}
/**
* called when we reach the end of entry in one of the read() methods.
*/
private void processEntry(ManifestEntryVerifier mev)
throws IOException
{
if (!parsingBlockOrSF) {
JarEntry je = mev.getEntry();
if ((je != null) && (je.signers == null)) {
je.signers = mev.verify(verifiedSigners, sigFileSigners);
je.certs = mapSignersToCertArray(je.signers);
}
} else {
try {
parsingBlockOrSF = false;
if (debug != null) {
debug.println("processEntry: processing block");
}
String uname = mev.getEntry().getName()
.toUpperCase(Locale.ENGLISH);
if (uname.endsWith(".SF")) {
String key = uname.substring(0, uname.length()-3);
byte bytes[] = baos.toByteArray();
// add to sigFileData in case future blocks need it
sigFileData.put(key, bytes);
// check pending blocks, we can now process
// anyone waiting for this .SF file
Iterator<SignatureFileVerifier> it = pendingBlocks.iterator();
while (it.hasNext()) {
SignatureFileVerifier sfv = it.next();
if (sfv.needSignatureFile(key)) {
if (debug != null) {
debug.println(
"processEntry: processing pending block");
}
sfv.setSignatureFile(bytes);
sfv.process(sigFileSigners, manifestDigests);
}
}
return;
}
// now we are parsing a signature block file
String key = uname.substring(0, uname.lastIndexOf("."));
if (signerCache == null)
signerCache = new ArrayList<>();
if (manDig == null) {
synchronized(manifestRawBytes) {
if (manDig == null) {
manDig = new ManifestDigester(manifestRawBytes);
manifestRawBytes = null;
}
}
}
SignatureFileVerifier sfv =
new SignatureFileVerifier(signerCache,
manDig, uname, baos.toByteArray());
if (sfv.needSignatureFileBytes()) {
// see if we have already parsed an external .SF file
byte[] bytes = sigFileData.get(key);
if (bytes == null) {
// put this block on queue for later processing
// since we don't have the .SF bytes yet
// (uname, block);
if (debug != null) {
debug.println("adding pending block");
}
pendingBlocks.add(sfv);
return;
} else {
sfv.setSignatureFile(bytes);
}
}
sfv.process(sigFileSigners, manifestDigests);
} catch (IOException ioe) {
// e.g. sun.security.pkcs.ParsingException
if (debug != null) debug.println("processEntry caught: "+ioe);
// ignore and treat as unsigned
} catch (SignatureException se) {
if (debug != null) debug.println("processEntry caught: "+se);
// ignore and treat as unsigned
} catch (NoSuchAlgorithmException nsae) {
if (debug != null) debug.println("processEntry caught: "+nsae);
// ignore and treat as unsigned
} catch (CertificateException ce) {
if (debug != null) debug.println("processEntry caught: "+ce);
// ignore and treat as unsigned
}
}
}
// Android-changed: @deprecated tag needs a description. http://b/110781661
/**
* Return an array of java.security.cert.Certificate objects for
* the given file in the jar.
* @deprecated Deprecated.
*/
@Deprecated
public java.security.cert.Certificate[] getCerts(String name)
{
return mapSignersToCertArray(getCodeSigners(name));
}
public java.security.cert.Certificate[] getCerts(JarFile jar, JarEntry entry)
{
return mapSignersToCertArray(getCodeSigners(jar, entry));
}
/**
* return an array of CodeSigner objects for
* the given file in the jar. this array is not cloned.
*
*/
public CodeSigner[] getCodeSigners(String name)
{
return verifiedSigners.get(name);
}
public CodeSigner[] getCodeSigners(JarFile jar, JarEntry entry)
{
String name = entry.getName();
if (eagerValidation && sigFileSigners.get(name) != null) {
/*
* Force a read of the entry data to generate the
* verification hash.
*/
try {
InputStream s = jar.getInputStream(entry);
byte[] buffer = new byte[1024];
int n = buffer.length;
while (n != -1) {
n = s.read(buffer, 0, buffer.length);
}
s.close();
} catch (IOException e) {
}
}
return getCodeSigners(name);
}
/*
* Convert an array of signers into an array of concatenated certificate
* arrays.
*/
private static java.security.cert.Certificate[] mapSignersToCertArray(
CodeSigner[] signers) {
if (signers != null) {
ArrayList<java.security.cert.Certificate> certChains = new ArrayList<>();
for (int i = 0; i < signers.length; i++) {
certChains.addAll(
signers[i].getSignerCertPath().getCertificates());
}
// Convert into a Certificate[]
return certChains.toArray(
new java.security.cert.Certificate[certChains.size()]);
}
return null;
}
/**
* returns true if there no files to verify.
* should only be called after all the META-INF entries
* have been processed.
*/
boolean nothingToVerify()
{
return (anyToVerify == false);
}
/**
* called to let us know we have processed all the
* META-INF entries, and if we re-read one of them, don't
* re-process it. Also gets rid of any data structures
* we needed when parsing META-INF entries.
*/
void doneWithMeta()
{
parsingMeta = false;
anyToVerify = !sigFileSigners.isEmpty();
baos = null;
sigFileData = null;
pendingBlocks = null;
signerCache = null;
manDig = null;
// MANIFEST.MF is always treated as signed and verified,
// move its signers from sigFileSigners to verifiedSigners.
if (sigFileSigners.containsKey(JarFile.MANIFEST_NAME)) {
CodeSigner[] codeSigners = sigFileSigners.remove(JarFile.MANIFEST_NAME);
verifiedSigners.put(JarFile.MANIFEST_NAME, codeSigners);
}
}
static class VerifierStream extends java.io.InputStream {
private InputStream is;
private JarVerifier jv;
private ManifestEntryVerifier mev;
private long numLeft;
VerifierStream(Manifest man,
JarEntry je,
InputStream is,
JarVerifier jv) throws IOException
{
// BEGIN Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
// To know that null signals that the stream has been closed, we disallow
// it in the constructor. There's no need for anyone to pass null into this
// constructor, anyway.
if (is == null) {
throw new NullPointerException("is == null");
}
// END Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
this.is = is;
this.jv = jv;
this.mev = new ManifestEntryVerifier(man);
this.jv.beginEntry(je, mev);
this.numLeft = je.getSize();
if (this.numLeft == 0)
this.jv.update(-1, this.mev);
}
public int read() throws IOException
{
// BEGIN Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
if (is == null) {
throw new IOException("stream closed");
}
// END Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
if (numLeft > 0) {
int b = is.read();
jv.update(b, mev);
numLeft--;
if (numLeft == 0)
jv.update(-1, mev);
return b;
} else {
return -1;
}
}
public int read(byte b[], int off, int len) throws IOException {
// BEGIN Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
if (is == null) {
throw new IOException("stream closed");
}
// END Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
if ((numLeft > 0) && (numLeft < len)) {
len = (int)numLeft;
}
if (numLeft > 0) {
int n = is.read(b, off, len);
jv.update(n, b, off, len, mev);
numLeft -= n;
if (numLeft == 0)
jv.update(-1, b, off, len, mev);
return n;
} else {
return -1;
}
}
public void close()
throws IOException
{
if (is != null)
is.close();
is = null;
mev = null;
jv = null;
}
public int available() throws IOException {
// BEGIN Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
if (is == null) {
throw new IOException("stream closed");
}
// END Android-added: Throw IOE, not NPE, if stream is closed. http://b/110695212
return is.available();
}
}
// Extended JavaUtilJarAccess CodeSource API Support
private Map<URL, Map<CodeSigner[], CodeSource>> urlToCodeSourceMap = new HashMap<>();
private Map<CodeSigner[], CodeSource> signerToCodeSource = new HashMap<>();
private URL lastURL;
private Map<CodeSigner[], CodeSource> lastURLMap;
/*
* Create a unique mapping from codeSigner cache entries to CodeSource.
* In theory, multiple URLs origins could map to a single locally cached
* and shared JAR file although in practice there will be a single URL in use.
*/
private synchronized CodeSource mapSignersToCodeSource(URL url, CodeSigner[] signers) {
Map<CodeSigner[], CodeSource> map;
if (url == lastURL) {
map = lastURLMap;
} else {
map = urlToCodeSourceMap.get(url);
if (map == null) {
map = new HashMap<>();
urlToCodeSourceMap.put(url, map);
}
lastURLMap = map;
lastURL = url;
}
CodeSource cs = map.get(signers);
if (cs == null) {
cs = new VerifierCodeSource(csdomain, url, signers);
signerToCodeSource.put(signers, cs);
}
return cs;
}
private CodeSource[] mapSignersToCodeSources(URL url, List<CodeSigner[]> signers, boolean unsigned) {
List<CodeSource> sources = new ArrayList<>();
for (int i = 0; i < signers.size(); i++) {
sources.add(mapSignersToCodeSource(url, signers.get(i)));
}
if (unsigned) {
sources.add(mapSignersToCodeSource(url, null));
}
return sources.toArray(new CodeSource[sources.size()]);
}
private CodeSigner[] emptySigner = new CodeSigner[0];
/*
* Match CodeSource to a CodeSigner[] in the signer cache.
*/
private CodeSigner[] findMatchingSigners(CodeSource cs) {
if (cs instanceof VerifierCodeSource) {
VerifierCodeSource vcs = (VerifierCodeSource) cs;
if (vcs.isSameDomain(csdomain)) {
return ((VerifierCodeSource) cs).getPrivateSigners();
}
}
/*
* In practice signers should always be optimized above
* but this handles a CodeSource of any type, just in case.
*/
CodeSource[] sources = mapSignersToCodeSources(cs.getLocation(), getJarCodeSigners(), true);
List<CodeSource> sourceList = new ArrayList<>();
for (int i = 0; i < sources.length; i++) {
sourceList.add(sources[i]);
}
int j = sourceList.indexOf(cs);
if (j != -1) {
CodeSigner[] match;
match = ((VerifierCodeSource) sourceList.get(j)).getPrivateSigners();
if (match == null) {
match = emptySigner;
}
return match;
}
return null;
}
/*
* Instances of this class hold uncopied references to internal
* signing data that can be compared by object reference identity.
*/
private static class VerifierCodeSource extends CodeSource {
private static final long serialVersionUID = -9047366145967768825L;
URL vlocation;
CodeSigner[] vsigners;
java.security.cert.Certificate[] vcerts;
Object csdomain;
VerifierCodeSource(Object csdomain, URL location, CodeSigner[] signers) {
super(location, signers);
this.csdomain = csdomain;
vlocation = location;
vsigners = signers; // from signerCache
}
VerifierCodeSource(Object csdomain, URL location, java.security.cert.Certificate[] certs) {
super(location, certs);
this.csdomain = csdomain;
vlocation = location;
vcerts = certs; // from signerCache
}
/*
* All VerifierCodeSource instances are constructed based on
* singleton signerCache or signerCacheCert entries for each unique signer.
* No CodeSigner<->Certificate[] conversion is required.
* We use these assumptions to optimize equality comparisons.
*/
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof VerifierCodeSource) {
VerifierCodeSource that = (VerifierCodeSource) obj;
/*
* Only compare against other per-signer singletons constructed
* on behalf of the same JarFile instance. Otherwise, compare
* things the slower way.
*/
if (isSameDomain(that.csdomain)) {
if (that.vsigners != this.vsigners
|| that.vcerts != this.vcerts) {
return false;
}
if (that.vlocation != null) {
return that.vlocation.equals(this.vlocation);
} else if (this.vlocation != null) {
return this.vlocation.equals(that.vlocation);
} else { // both null
return true;
}
}
}
return super.equals(obj);
}
boolean isSameDomain(Object csdomain) {
return this.csdomain == csdomain;
}
private CodeSigner[] getPrivateSigners() {
return vsigners;
}
private java.security.cert.Certificate[] getPrivateCertificates() {
return vcerts;
}
}
private Map<String, CodeSigner[]> signerMap;
private synchronized Map<String, CodeSigner[]> signerMap() {
if (signerMap == null) {
/*
* Snapshot signer state so it doesn't change on us. We care
* only about the asserted signatures. Verification of
* signature validity happens via the JarEntry apis.
*/
signerMap = new HashMap<>(verifiedSigners.size() + sigFileSigners.size());
signerMap.putAll(verifiedSigners);
signerMap.putAll(sigFileSigners);
}
return signerMap;
}
public synchronized Enumeration<String> entryNames(JarFile jar, final CodeSource[] cs) {
final Map<String, CodeSigner[]> map = signerMap();
final Iterator<Map.Entry<String, CodeSigner[]>> itor = map.entrySet().iterator();
boolean matchUnsigned = false;
/*
* Grab a single copy of the CodeSigner arrays. Check
* to see if we can optimize CodeSigner equality test.
*/
List<CodeSigner[]> req = new ArrayList<>(cs.length);
for (int i = 0; i < cs.length; i++) {
CodeSigner[] match = findMatchingSigners(cs[i]);
if (match != null) {
if (match.length > 0) {
req.add(match);
} else {
matchUnsigned = true;
}
} else {
matchUnsigned = true;
}
}
final List<CodeSigner[]> signersReq = req;
final Enumeration<String> enum2 = (matchUnsigned) ? unsignedEntryNames(jar) : emptyEnumeration;
return new Enumeration<String>() {
String name;
public boolean hasMoreElements() {
if (name != null) {
return true;
}
while (itor.hasNext()) {
Map.Entry<String, CodeSigner[]> e = itor.next();
if (signersReq.contains(e.getValue())) {
name = e.getKey();
return true;
}
}
while (enum2.hasMoreElements()) {
name = enum2.nextElement();
return true;
}
return false;
}
public String nextElement() {
if (hasMoreElements()) {
String value = name;
name = null;
return value;
}
throw new NoSuchElementException();
}
};
}
/*
* Like entries() but screens out internal JAR mechanism entries
* and includes signed entries with no ZIP data.
*/
public Enumeration<JarEntry> entries2(final JarFile jar, Enumeration<? extends ZipEntry> e) {
final Map<String, CodeSigner[]> map = new HashMap<>();
map.putAll(signerMap());
final Enumeration<? extends ZipEntry> enum_ = e;
return new Enumeration<JarEntry>() {
Enumeration<String> signers = null;
JarEntry entry;
public boolean hasMoreElements() {
if (entry != null) {
return true;
}
while (enum_.hasMoreElements()) {
ZipEntry ze = enum_.nextElement();
if (JarVerifier.isSigningRelated(ze.getName())) {
continue;
}
entry = jar.newEntry(ze);
return true;
}
if (signers == null) {
signers = Collections.enumeration(map.keySet());
}
while (signers.hasMoreElements()) {
String name = signers.nextElement();
entry = jar.newEntry(new ZipEntry(name));
return true;
}
// Any map entries left?
return false;
}
public JarEntry nextElement() {
if (hasMoreElements()) {
JarEntry je = entry;
map.remove(je.getName());
entry = null;
return je;
}
throw new NoSuchElementException();
}
};
}
private Enumeration<String> emptyEnumeration = new Enumeration<String>() {
public boolean hasMoreElements() {
return false;
}
public String nextElement() {
throw new NoSuchElementException();
}
};
// true if file is part of the signature mechanism itself
static boolean isSigningRelated(String name) {
return SignatureFileVerifier.isSigningRelated(name);
}
private Enumeration<String> unsignedEntryNames(JarFile jar) {
final Map<String, CodeSigner[]> map = signerMap();
final Enumeration<JarEntry> entries = jar.entries();
return new Enumeration<String>() {
String name;
/*
* Grab entries from ZIP directory but screen out
* metadata.
*/
public boolean hasMoreElements() {
if (name != null) {
return true;
}
while (entries.hasMoreElements()) {
String value;
ZipEntry e = entries.nextElement();
value = e.getName();
if (e.isDirectory() || isSigningRelated(value)) {
continue;
}
if (map.get(value) == null) {
name = value;
return true;
}
}
return false;
}
public String nextElement() {
if (hasMoreElements()) {
String value = name;
name = null;
return value;
}
throw new NoSuchElementException();
}
};
}
private List<CodeSigner[]> jarCodeSigners;
private synchronized List<CodeSigner[]> getJarCodeSigners() {
CodeSigner[] signers;
if (jarCodeSigners == null) {
HashSet<CodeSigner[]> set = new HashSet<>();
set.addAll(signerMap().values());
jarCodeSigners = new ArrayList<>();
jarCodeSigners.addAll(set);
}
return jarCodeSigners;
}
public synchronized CodeSource[] getCodeSources(JarFile jar, URL url) {
boolean hasUnsigned = unsignedEntryNames(jar).hasMoreElements();
return mapSignersToCodeSources(url, getJarCodeSigners(), hasUnsigned);
}
public CodeSource getCodeSource(URL url, String name) {
CodeSigner[] signers;
signers = signerMap().get(name);
return mapSignersToCodeSource(url, signers);
}
public CodeSource getCodeSource(URL url, JarFile jar, JarEntry je) {
CodeSigner[] signers;
return mapSignersToCodeSource(url, getCodeSigners(jar, je));
}
public void setEagerValidation(boolean eager) {
eagerValidation = eager;
}
public synchronized List<Object> getManifestDigests() {
return Collections.unmodifiableList(manifestDigests);
}
static CodeSource getUnsignedCS(URL url) {
return new VerifierCodeSource(null, url, (java.security.cert.Certificate[]) null);
}
}
| mirego/j2objc | jre_emul/android/platform/libcore/ojluni/src/main/java/java/util/jar/JarVerifier.java | Java | apache-2.0 | 31,476 |
/** @file
*
* Copyright (c) 2012-2014, ARM Limited. All rights reserved.
*
* This program and the accompanying materials
* are licensed and made available under the terms and conditions of the BSD License
* which accompanies this distribution. The full text of the license may be found at
* http://opensource.org/licenses/bsd-license.php
*
* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
*
**/
#include "Lan9118Dxe.h"
STATIC EFI_MAC_ADDRESS mZeroMac = { { 0 } };
/**
This internal function reverses bits for 32bit data.
@param Value The data to be reversed.
@return Data reversed.
**/
UINT32
ReverseBits (
UINT32 Value
)
{
UINTN Index;
UINT32 NewValue;
NewValue = 0;
for (Index = 0; Index < 32; Index++) {
if ((Value & (1 << Index)) != 0) {
NewValue = NewValue | (1 << (31 - Index));
}
}
return NewValue;
}
/*
** Create Ethernet CRC
**
** INFO USED:
** 1: http://en.wikipedia.org/wiki/Cyclic_redundancy_check
**
** 2: http://www.erg.abdn.ac.uk/~gorry/eg3567/dl-pages/crc.html
**
** 3: http://en.wikipedia.org/wiki/Computation_of_CRC
*/
UINT32
GenEtherCrc32 (
IN EFI_MAC_ADDRESS *Mac,
IN UINT32 AddrLen
)
{
INT32 Iter;
UINT32 Remainder;
UINT8 *Ptr;
Iter = 0;
Remainder = 0xFFFFFFFF; // 0xFFFFFFFF is standard seed for Ethernet
// Convert Mac Address to array of bytes
Ptr = (UINT8*)Mac;
// Generate the Crc bit-by-bit (LSB first)
while (AddrLen--) {
Remainder ^= *Ptr++;
for (Iter = 0;Iter < 8;Iter++) {
// Check if exponent is set
if (Remainder & 1) {
Remainder = (Remainder >> 1) ^ CRC_POLYNOMIAL;
} else {
Remainder = (Remainder >> 1) ^ 0;
}
}
}
// Reverse the bits before returning (to Big Endian)
//TODO: Need to be reviewed. Do we want to do a bit reverse or a byte reverse (in this case use SwapBytes32())
return ReverseBits (Remainder);
}
// Function to read from MAC indirect registers
UINT32
IndirectMACRead32 (
UINT32 Index
)
{
UINT32 MacCSR;
// Check index is in the range
ASSERT(Index <= 12);
// Wait until CSR busy bit is cleared
while ((Lan9118MmioRead32 (LAN9118_MAC_CSR_CMD) & MAC_CSR_BUSY) == MAC_CSR_BUSY);
// Set CSR busy bit to ensure read will occur
// Set the R/W bit to indicate we are reading
// Set the index of CSR Address to access desired register
MacCSR = MAC_CSR_BUSY | MAC_CSR_READ | MAC_CSR_ADDR(Index);
// Write to the register
Lan9118MmioWrite32 (LAN9118_MAC_CSR_CMD, MacCSR);
// Wait until CSR busy bit is cleared
while ((Lan9118MmioRead32 (LAN9118_MAC_CSR_CMD) & MAC_CSR_BUSY) == MAC_CSR_BUSY);
// Now read from data register to get read value
return Lan9118MmioRead32 (LAN9118_MAC_CSR_DATA);
}
/*
* LAN9118 chips have special restrictions on some back-to-back Write/Read or
* Read/Read pairs of accesses. After a read or write that changes the state of
* the device, there is a period in which stale values may be returned in
* response to a read. This period is dependent on the registers accessed.
*
* We must delay prior reads by this period. This can either be achieved by
* timer-based delays, or by performing dummy reads of the BYTE_TEST register,
* for which the recommended number of reads is described in the LAN9118 data
* sheet. This is required in addition to any memory barriers.
*
* This function performs a number of dummy reads of the BYTE_TEST register, as
* a building block for the above.
*/
VOID
WaitDummyReads (
UINTN Count
)
{
while (Count--)
MmioRead32(LAN9118_BYTE_TEST);
}
UINT32
Lan9118RawMmioRead32(
UINTN Address,
UINTN Delay
)
{
UINT32 Value;
Value = MmioRead32(Address);
WaitDummyReads(Delay);
return Value;
}
UINT32
Lan9118RawMmioWrite32(
UINTN Address,
UINT32 Value,
UINTN Delay
)
{
MmioWrite32(Address, Value);
WaitDummyReads(Delay);
return Value;
}
// Function to write to MAC indirect registers
UINT32
IndirectMACWrite32 (
UINT32 Index,
UINT32 Value
)
{
UINT32 ValueWritten;
UINT32 MacCSR;
// Check index is in the range
ASSERT(Index <= 12);
// Wait until CSR busy bit is cleared
while ((Lan9118MmioRead32 (LAN9118_MAC_CSR_CMD) & MAC_CSR_BUSY) == MAC_CSR_BUSY);
// Set CSR busy bit to ensure read will occur
// Set the R/W bit to indicate we are writing
// Set the index of CSR Address to access desired register
MacCSR = MAC_CSR_BUSY | MAC_CSR_WRITE | MAC_CSR_ADDR(Index);
// Now write the value to the register before issuing the write command
ValueWritten = Lan9118MmioWrite32 (LAN9118_MAC_CSR_DATA, Value);
// Write the config to the register
Lan9118MmioWrite32 (LAN9118_MAC_CSR_CMD, MacCSR);
// Wait until CSR busy bit is cleared
while ((Lan9118MmioRead32 (LAN9118_MAC_CSR_CMD) & MAC_CSR_BUSY) == MAC_CSR_BUSY);
return ValueWritten;
}
// Function to read from MII register (PHY Access)
UINT32
IndirectPHYRead32 (
UINT32 Index
)
{
UINT32 ValueRead;
UINT32 MiiAcc;
// Check it is a valid index
ASSERT(Index < 31);
// Wait for busy bit to clear
while ((IndirectMACRead32 (INDIRECT_MAC_INDEX_MII_ACC) & MII_ACC_MII_BUSY) == MII_ACC_MII_BUSY);
// Clear the R/W bit to indicate we are reading
// Set the index of the MII register
// Set the PHY Address
// Set the MII busy bit to allow read
MiiAcc = MII_ACC_MII_READ | MII_ACC_MII_REG_INDEX(Index) | MII_ACC_PHY_VALUE | MII_ACC_MII_BUSY;
// Now write this config to register
IndirectMACWrite32 (INDIRECT_MAC_INDEX_MII_ACC, MiiAcc & 0xFFFF);
// Wait for busy bit to clear
while ((IndirectMACRead32 (INDIRECT_MAC_INDEX_MII_ACC) & MII_ACC_MII_BUSY) == MII_ACC_MII_BUSY);
// Now read the value of the register
ValueRead = (IndirectMACRead32 (INDIRECT_MAC_INDEX_MII_DATA) & 0xFFFF); // only lower 16 bits are valid for any PHY register
return ValueRead;
}
// Function to write to the MII register (PHY Access)
UINT32
IndirectPHYWrite32 (
UINT32 Index,
UINT32 Value
)
{
UINT32 MiiAcc;
UINT32 ValueWritten;
// Check it is a valid index
ASSERT(Index < 31);
// Wait for busy bit to clear
while ((IndirectMACRead32 (INDIRECT_MAC_INDEX_MII_ACC) & MII_ACC_MII_BUSY) == MII_ACC_MII_BUSY);
// Clear the R/W bit to indicate we are reading
// Set the index of the MII register
// Set the PHY Address
// Set the MII busy bit to allow read
MiiAcc = MII_ACC_MII_WRITE | MII_ACC_MII_REG_INDEX(Index) | MII_ACC_PHY_VALUE | MII_ACC_MII_BUSY;
// Write the desired value to the register first
ValueWritten = IndirectMACWrite32 (INDIRECT_MAC_INDEX_MII_DATA, (Value & 0xFFFF));
// Now write the config to register
IndirectMACWrite32 (INDIRECT_MAC_INDEX_MII_ACC, MiiAcc & 0xFFFF);
// Wait for operation to terminate
while ((IndirectMACRead32 (INDIRECT_MAC_INDEX_MII_ACC) & MII_ACC_MII_BUSY) == MII_ACC_MII_BUSY);
return ValueWritten;
}
/* ---------------- EEPROM Operations ------------------ */
// Function to read from EEPROM memory
UINT32
IndirectEEPROMRead32 (
UINT32 Index
)
{
UINT32 EepromCmd;
// Set the busy bit to ensure read will occur
EepromCmd = E2P_EPC_BUSY | E2P_EPC_CMD_READ;
// Set the index to access desired EEPROM memory location
EepromCmd |= E2P_EPC_ADDRESS(Index);
// Write to Eeprom command register
Lan9118MmioWrite32 (LAN9118_E2P_CMD, EepromCmd);
// Wait until operation has completed
while (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_BUSY);
// Check that operation didn't time out
if (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_TIMEOUT) {
DEBUG ((EFI_D_ERROR, "EEPROM Operation Timed out: Read command on index %x\n",Index));
return 0;
}
// Wait until operation has completed
while (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_BUSY);
// Finally read the value
return Lan9118MmioRead32 (LAN9118_E2P_DATA);
}
// Function to write to EEPROM memory
UINT32
IndirectEEPROMWrite32 (
UINT32 Index,
UINT32 Value
)
{
UINT32 ValueWritten;
UINT32 EepromCmd;
ValueWritten = 0;
// Read the EEPROM Command register
EepromCmd = Lan9118MmioRead32 (LAN9118_E2P_CMD);
// Set the busy bit to ensure read will occur
EepromCmd |= ((UINT32)1 << 31);
// Set the EEPROM command to write(0b011)
EepromCmd &= ~(7 << 28); // Clear the command first
EepromCmd |= (3 << 28); // Write 011
// Set the index to access desired EEPROM memory location
EepromCmd |= (Index & 0xF);
// Write the value to the data register first
ValueWritten = Lan9118MmioWrite32 (LAN9118_E2P_DATA, Value);
// Write to Eeprom command register
Lan9118MmioWrite32 (LAN9118_E2P_CMD, EepromCmd);
// Wait until operation has completed
while (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_BUSY);
// Check that operation didn't time out
if (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_TIMEOUT) {
DEBUG ((EFI_D_ERROR, "EEPROM Operation Timed out: Write command at memloc 0x%x, with value 0x%x\n",Index, Value));
return 0;
}
// Wait until operation has completed
while (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_BUSY);
return ValueWritten;
}
/* ---------------- General Operations ----------------- */
VOID
Lan9118SetMacAddress (
EFI_MAC_ADDRESS *Mac,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
IndirectMACWrite32 (INDIRECT_MAC_INDEX_ADDRL,
(Mac->Addr[0] & 0xFF) |
((Mac->Addr[1] & 0xFF) << 8) |
((Mac->Addr[2] & 0xFF) << 16) |
((Mac->Addr[3] & 0xFF) << 24)
);
IndirectMACWrite32 (INDIRECT_MAC_INDEX_ADDRH,
(UINT32)(Mac->Addr[4] & 0xFF) |
((Mac->Addr[5] & 0xFF) << 8)
);
}
VOID
Lan9118ReadMacAddress (
OUT EFI_MAC_ADDRESS *MacAddress
)
{
UINT32 MacAddrHighValue;
UINT32 MacAddrLowValue;
// Read the Mac Addr high register
MacAddrHighValue = (IndirectMACRead32 (INDIRECT_MAC_INDEX_ADDRH) & 0xFFFF);
// Read the Mac Addr low register
MacAddrLowValue = IndirectMACRead32 (INDIRECT_MAC_INDEX_ADDRL);
SetMem (MacAddress, sizeof(*MacAddress), 0);
MacAddress->Addr[0] = (MacAddrLowValue & 0xFF);
MacAddress->Addr[1] = (MacAddrLowValue & 0xFF00) >> 8;
MacAddress->Addr[2] = (MacAddrLowValue & 0xFF0000) >> 16;
MacAddress->Addr[3] = (MacAddrLowValue & 0xFF000000) >> 24;
MacAddress->Addr[4] = (MacAddrHighValue & 0xFF);
MacAddress->Addr[5] = (MacAddrHighValue & 0xFF00) >> 8;
}
/*
* Power up the 9118 and find its MAC address.
*
* This operation can be carried out when the LAN9118 is in any power state
*
*/
EFI_STATUS
Lan9118Initialize (
IN EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINTN Retries;
UINT64 DefaultMacAddress;
// Attempt to wake-up the device if it is in a lower power state
if (((Lan9118MmioRead32 (LAN9118_PMT_CTRL) & MPTCTRL_PM_MODE_MASK) >> 12) != 0) {
DEBUG ((DEBUG_NET, "Waking from reduced power state.\n"));
Lan9118MmioWrite32 (LAN9118_BYTE_TEST, 0xFFFFFFFF);
}
// Check that device is active
Retries = 20;
while ((Lan9118MmioRead32 (LAN9118_PMT_CTRL) & MPTCTRL_READY) == 0 && --Retries) {
gBS->Stall (LAN9118_STALL);
}
if (!Retries) {
return EFI_TIMEOUT;
}
// Check that EEPROM isn't active
Retries = 20;
while ((Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_BUSY) && --Retries){
gBS->Stall (LAN9118_STALL);
}
if (!Retries) {
return EFI_TIMEOUT;
}
// Check if a MAC address was loaded from EEPROM, and if it was, set it as the
// current address.
if ((Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_MAC_ADDRESS_LOADED) == 0) {
DEBUG ((EFI_D_ERROR, "Warning: There was an error detecting EEPROM or loading the MAC Address.\n"));
// If we had an address before (set by StationAddess), continue to use it
if (CompareMem (&Snp->Mode->CurrentAddress, &mZeroMac, NET_ETHER_ADDR_LEN)) {
Lan9118SetMacAddress (&Snp->Mode->CurrentAddress, Snp);
} else {
// If there are no cached addresses, then fall back to a default
DEBUG ((EFI_D_WARN, "Warning: using driver-default MAC address\n"));
DefaultMacAddress = FixedPcdGet64 (PcdLan9118DefaultMacAddress);
Lan9118SetMacAddress((EFI_MAC_ADDRESS *) &DefaultMacAddress, Snp);
CopyMem (&Snp->Mode->CurrentAddress, &DefaultMacAddress, NET_ETHER_ADDR_LEN);
}
} else {
// Store the MAC address that was loaded from EEPROM
Lan9118ReadMacAddress (&Snp->Mode->CurrentAddress);
CopyMem (&Snp->Mode->PermanentAddress, &Snp->Mode->CurrentAddress, NET_ETHER_ADDR_LEN);
}
// Clear and acknowledge interrupts
Lan9118MmioWrite32 (LAN9118_INT_EN, 0);
Lan9118MmioWrite32 (LAN9118_IRQ_CFG, 0);
Lan9118MmioWrite32 (LAN9118_INT_STS, 0xFFFFFFFF);
// Do self tests here?
return EFI_SUCCESS;
}
// Perform software reset on the LAN9118
// Return 0 on success, -1 on error
EFI_STATUS
SoftReset (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 HwConf;
UINT32 ResetTime;
// Initialize variable
ResetTime = 0;
// Stop Rx and Tx
StopTx (STOP_TX_MAC | STOP_TX_CFG | STOP_TX_CLEAR, Snp);
StopRx (STOP_RX_CLEAR, Snp); // Clear receiver FIFO
// Issue the reset
HwConf = Lan9118MmioRead32 (LAN9118_HW_CFG);
HwConf |= 1;
// Set the Must Be One (MBO) bit
if (((HwConf & HWCFG_MBO) >> 20) == 0) {
HwConf |= HWCFG_MBO;
}
// Check that EEPROM isn't active
while (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_BUSY);
// Write the configuration
Lan9118MmioWrite32 (LAN9118_HW_CFG, HwConf);
// Wait for reset to complete
while (Lan9118MmioRead32 (LAN9118_HW_CFG) & HWCFG_SRST) {
gBS->Stall (LAN9118_STALL);
ResetTime += 1;
// If time taken exceeds 100us, then there was an error condition
if (ResetTime > 1000) {
Snp->Mode->State = EfiSimpleNetworkStopped;
return EFI_TIMEOUT;
}
}
// Check that EEPROM isn't active
while (Lan9118MmioRead32 (LAN9118_E2P_CMD) & E2P_EPC_BUSY);
// TODO we probably need to re-set the mac address here.
// Clear and acknowledge all interrupts
if (Flags & SOFT_RESET_CLEAR_INT) {
Lan9118MmioWrite32 (LAN9118_INT_EN, 0);
Lan9118MmioWrite32 (LAN9118_IRQ_CFG, 0);
Lan9118MmioWrite32 (LAN9118_INT_STS, 0xFFFFFFFF);
}
// Do self tests here?
if (Flags & SOFT_RESET_SELF_TEST) {
}
return EFI_SUCCESS;
}
// Perform PHY software reset
EFI_STATUS
PhySoftReset (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 PmtCtrl = 0;
// PMT PHY reset takes precedence over BCR
if (Flags & PHY_RESET_PMT) {
PmtCtrl = Lan9118MmioRead32 (LAN9118_PMT_CTRL);
PmtCtrl |= MPTCTRL_PHY_RST;
Lan9118MmioWrite32 (LAN9118_PMT_CTRL,PmtCtrl);
// Wait for completion
while (Lan9118MmioRead32 (LAN9118_PMT_CTRL) & MPTCTRL_PHY_RST) {
gBS->Stall (LAN9118_STALL);
}
// PHY Basic Control Register reset
} else if (Flags & PHY_RESET_BCR) {
IndirectPHYWrite32 (PHY_INDEX_BASIC_CTRL, PHYCR_RESET);
// Wait for completion
while (IndirectPHYRead32 (PHY_INDEX_BASIC_CTRL) & PHYCR_RESET) {
gBS->Stall (LAN9118_STALL);
}
}
// Clear and acknowledge all interrupts
if (Flags & PHY_SOFT_RESET_CLEAR_INT) {
Lan9118MmioWrite32 (LAN9118_INT_EN, 0);
Lan9118MmioWrite32 (LAN9118_IRQ_CFG, 0);
Lan9118MmioWrite32 (LAN9118_INT_STS, 0xFFFFFFFF);
}
return EFI_SUCCESS;
}
// Configure hardware for LAN9118
EFI_STATUS
ConfigureHardware (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 GpioConf;
// Check if we want to use LEDs on GPIO
if (Flags & HW_CONF_USE_LEDS) {
GpioConf = Lan9118MmioRead32 (LAN9118_GPIO_CFG);
// Enable GPIO as LEDs and Config as Push-Pull driver
GpioConf |= GPIO_GPIO0_PUSH_PULL | GPIO_GPIO1_PUSH_PULL | GPIO_GPIO2_PUSH_PULL |
GPIO_LED1_ENABLE | GPIO_LED2_ENABLE | GPIO_LED3_ENABLE;
// Write the configuration
Lan9118MmioWrite32 (LAN9118_GPIO_CFG, GpioConf);
}
return EFI_SUCCESS;
}
// Configure flow control
EFI_STATUS
ConfigureFlow (
UINT32 Flags,
UINT32 HighTrig,
UINT32 LowTrig,
UINT32 BPDuration,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
return EFI_SUCCESS;
}
// Do auto-negotiation
EFI_STATUS
AutoNegotiate (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 PhyControl;
UINT32 PhyStatus;
UINT32 Features;
UINT32 Retries;
// First check that auto-negotiation is supported
PhyStatus = IndirectPHYRead32 (PHY_INDEX_BASIC_STATUS);
if ((PhyStatus & PHYSTS_AUTO_CAP) == 0) {
DEBUG ((EFI_D_ERROR, "Auto-negotiation not supported.\n"));
return EFI_DEVICE_ERROR;
}
// Check that link is up first
if ((PhyStatus & PHYSTS_LINK_STS) == 0) {
// Wait until it is up or until Time Out
Retries = FixedPcdGet32 (PcdLan9118DefaultNegotiationTimeout) / LAN9118_STALL;
while ((IndirectPHYRead32 (PHY_INDEX_BASIC_STATUS) & PHYSTS_LINK_STS) == 0) {
gBS->Stall (LAN9118_STALL);
Retries--;
if (!Retries) {
DEBUG ((EFI_D_ERROR, "Link timeout in auto-negotiation.\n"));
return EFI_TIMEOUT;
}
}
}
// Configure features to advertise
Features = IndirectPHYRead32 (PHY_INDEX_AUTO_NEG_ADVERT);
if ((Flags & AUTO_NEGOTIATE_ADVERTISE_ALL) > 0) {
// Link speed capabilities
Features |= (PHYANA_10BASET | PHYANA_10BASETFD | PHYANA_100BASETX | PHYANA_100BASETXFD);
// Pause frame capabilities
Features &= ~(PHYANA_PAUSE_OP_MASK);
Features |= 3 << 10;
}
Features &= FixedPcdGet32 (PcdLan9118NegotiationFeatureMask);
// Write the features
IndirectPHYWrite32 (PHY_INDEX_AUTO_NEG_ADVERT, Features);
// Read control register
PhyControl = IndirectPHYRead32 (PHY_INDEX_BASIC_CTRL);
// Enable Auto-Negotiation
if ((PhyControl & PHYCR_AUTO_EN) == 0) {
PhyControl |= PHYCR_AUTO_EN;
}
// Restart auto-negotiation
PhyControl |= PHYCR_RST_AUTO;
// Enable collision test if required to do so
if (Flags & AUTO_NEGOTIATE_COLLISION_TEST) {
PhyControl |= PHYCR_COLL_TEST;
} else {
PhyControl &= ~ PHYCR_COLL_TEST;
}
// Write this configuration
IndirectPHYWrite32 (PHY_INDEX_BASIC_CTRL, PhyControl);
// Wait until process has completed
while ((IndirectPHYRead32 (PHY_INDEX_BASIC_STATUS) & PHYSTS_AUTO_COMP) == 0);
return EFI_SUCCESS;
}
// Check the Link Status and take appropriate action
EFI_STATUS
CheckLinkStatus (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
// Get the PHY Status
UINT32 PhyBStatus = IndirectPHYRead32 (PHY_INDEX_BASIC_STATUS);
if (PhyBStatus & PHYSTS_LINK_STS) {
return EFI_SUCCESS;
} else {
return EFI_DEVICE_ERROR;
}
}
// Stop the transmitter
EFI_STATUS
StopTx (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 MacCsr;
UINT32 TxCfg;
MacCsr = 0;
TxCfg = 0;
// Check if we want to clear tx
if (Flags & STOP_TX_CLEAR) {
TxCfg = Lan9118MmioRead32 (LAN9118_TX_CFG);
TxCfg |= TXCFG_TXS_DUMP | TXCFG_TXD_DUMP;
Lan9118MmioWrite32 (LAN9118_TX_CFG, TxCfg);
}
// Check if already stopped
if (Flags & STOP_TX_MAC) {
MacCsr = IndirectMACRead32 (INDIRECT_MAC_INDEX_CR);
if (MacCsr & MACCR_TX_EN) {
MacCsr &= ~MACCR_TX_EN;
IndirectMACWrite32 (INDIRECT_MAC_INDEX_CR, MacCsr);
}
}
if (Flags & STOP_TX_CFG) {
TxCfg = Lan9118MmioRead32 (LAN9118_TX_CFG);
if (TxCfg & TXCFG_TX_ON) {
TxCfg |= TXCFG_STOP_TX;
Lan9118MmioWrite32 (LAN9118_TX_CFG, TxCfg);
// Wait for Tx to finish transmitting
while (Lan9118MmioRead32 (LAN9118_TX_CFG) & TXCFG_STOP_TX);
}
}
return EFI_SUCCESS;
}
// Stop the receiver
EFI_STATUS
StopRx (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 MacCsr;
UINT32 RxCfg;
RxCfg = 0;
// Check if already stopped
MacCsr = IndirectMACRead32 (INDIRECT_MAC_INDEX_CR);
if (MacCsr & MACCR_RX_EN) {
MacCsr &= ~ MACCR_RX_EN;
IndirectMACWrite32 (INDIRECT_MAC_INDEX_CR, MacCsr);
}
// Check if we want to clear receiver FIFOs
if (Flags & STOP_RX_CLEAR) {
RxCfg = Lan9118MmioRead32 (LAN9118_RX_CFG);
RxCfg |= RXCFG_RX_DUMP;
Lan9118MmioWrite32 (LAN9118_RX_CFG, RxCfg);
while (Lan9118MmioRead32 (LAN9118_RX_CFG) & RXCFG_RX_DUMP);
}
return EFI_SUCCESS;
}
// Start the transmitter
EFI_STATUS
StartTx (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 MacCsr;
UINT32 TxCfg;
MacCsr = 0;
TxCfg = 0;
// Check if we want to clear tx
if (Flags & START_TX_CLEAR) {
TxCfg = Lan9118MmioRead32 (LAN9118_TX_CFG);
TxCfg |= TXCFG_TXS_DUMP | TXCFG_TXD_DUMP;
Lan9118MmioWrite32 (LAN9118_TX_CFG, TxCfg);
}
// Check if tx was started from MAC and enable if not
if (Flags & START_TX_MAC) {
MacCsr = IndirectMACRead32 (INDIRECT_MAC_INDEX_CR);
if ((MacCsr & MACCR_TX_EN) == 0) {
MacCsr |= MACCR_TX_EN;
IndirectMACWrite32 (INDIRECT_MAC_INDEX_CR, MacCsr);
}
}
// Check if tx was started from TX_CFG and enable if not
if (Flags & START_TX_CFG) {
TxCfg = Lan9118MmioRead32 (LAN9118_TX_CFG);
if ((TxCfg & TXCFG_TX_ON) == 0) {
TxCfg |= TXCFG_TX_ON;
Lan9118MmioWrite32 (LAN9118_TX_CFG, TxCfg);
}
}
// Set the tx data trigger level
return EFI_SUCCESS;
}
// Start the receiver
EFI_STATUS
StartRx (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 MacCsr;
UINT32 RxCfg;
RxCfg = 0;
// Check if already started
MacCsr = IndirectMACRead32 (INDIRECT_MAC_INDEX_CR);
if ((MacCsr & MACCR_RX_EN) == 0) {
// Check if we want to clear receiver FIFOs before starting
if (Flags & START_RX_CLEAR) {
RxCfg = Lan9118MmioRead32 (LAN9118_RX_CFG);
RxCfg |= RXCFG_RX_DUMP;
Lan9118MmioWrite32 (LAN9118_RX_CFG, RxCfg);
while (Lan9118MmioRead32 (LAN9118_RX_CFG) & RXCFG_RX_DUMP);
}
MacCsr |= MACCR_RX_EN;
IndirectMACWrite32 (INDIRECT_MAC_INDEX_CR, MacCsr);
}
return EFI_SUCCESS;
}
// Check Tx Data available space
UINT32
TxDataFreeSpace (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 TxInf;
UINT32 FreeSpace;
// Get the amount of free space from information register
TxInf = Lan9118MmioRead32 (LAN9118_TX_FIFO_INF);
FreeSpace = (TxInf & TXFIFOINF_TDFREE_MASK);
return FreeSpace; // Value in bytes
}
// Check Tx Status used space
UINT32
TxStatusUsedSpace (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 TxInf;
UINT32 UsedSpace;
// Get the amount of used space from information register
TxInf = Lan9118MmioRead32 (LAN9118_TX_FIFO_INF);
UsedSpace = (TxInf & TXFIFOINF_TXSUSED_MASK) >> 16;
return UsedSpace << 2; // Value in bytes
}
// Check Rx Data used space
UINT32
RxDataUsedSpace (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 RxInf;
UINT32 UsedSpace;
// Get the amount of used space from information register
RxInf = Lan9118MmioRead32 (LAN9118_RX_FIFO_INF);
UsedSpace = (RxInf & RXFIFOINF_RXDUSED_MASK);
return UsedSpace; // Value in bytes (rounded up to nearest DWORD)
}
// Check Rx Status used space
UINT32
RxStatusUsedSpace (
UINT32 Flags,
EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 RxInf;
UINT32 UsedSpace;
// Get the amount of used space from information register
RxInf = Lan9118MmioRead32 (LAN9118_RX_FIFO_INF);
UsedSpace = (RxInf & RXFIFOINF_RXSUSED_MASK) >> 16;
return UsedSpace << 2; // Value in bytes
}
// Change the allocation of FIFOs
EFI_STATUS
ChangeFifoAllocation (
IN UINT32 Flags,
IN OUT UINTN *TxDataSize OPTIONAL,
IN OUT UINTN *RxDataSize OPTIONAL,
IN OUT UINT32 *TxStatusSize OPTIONAL,
IN OUT UINT32 *RxStatusSize OPTIONAL,
IN OUT EFI_SIMPLE_NETWORK_PROTOCOL *Snp
)
{
UINT32 HwConf;
UINT32 TxFifoOption;
// Check that desired sizes don't exceed limits
if (*TxDataSize > TX_FIFO_MAX_SIZE)
return EFI_INVALID_PARAMETER;
#if defined(RX_FIFO_MIN_SIZE) && defined(RX_FIFO_MAX_SIZE)
if (*RxDataSize > RX_FIFO_MAX_SIZE) {
return EFI_INVALID_PARAMETER;
}
#endif
if (Flags & ALLOC_USE_DEFAULT) {
return EFI_SUCCESS;
}
// If we use the FIFOs (always use this first)
if (Flags & ALLOC_USE_FIFOS) {
// Read the current value of allocation
HwConf = Lan9118MmioRead32 (LAN9118_HW_CFG);
TxFifoOption = (HwConf >> 16) & 0xF;
// Choose the correct size (always use larger than requested if possible)
if (*TxDataSize < TX_FIFO_MIN_SIZE) {
*TxDataSize = TX_FIFO_MIN_SIZE;
*RxDataSize = 13440;
*RxStatusSize = 896;
TxFifoOption = 2;
} else if ((*TxDataSize > TX_FIFO_MIN_SIZE) && (*TxDataSize <= 2560)) {
*TxDataSize = 2560;
*RxDataSize = 12480;
*RxStatusSize = 832;
TxFifoOption = 3;
} else if ((*TxDataSize > 2560) && (*TxDataSize <= 3584)) {
*TxDataSize = 3584;
*RxDataSize = 11520;
*RxStatusSize = 768;
TxFifoOption = 4;
} else if ((*TxDataSize > 3584) && (*TxDataSize <= 4608)) { // default option
*TxDataSize = 4608;
*RxDataSize = 10560;
*RxStatusSize = 704;
TxFifoOption = 5;
} else if ((*TxDataSize > 4608) && (*TxDataSize <= 5632)) {
*TxDataSize = 5632;
*RxDataSize = 9600;
*RxStatusSize = 640;
TxFifoOption = 6;
} else if ((*TxDataSize > 5632) && (*TxDataSize <= 6656)) {
*TxDataSize = 6656;
*RxDataSize = 8640;
*RxStatusSize = 576;
TxFifoOption = 7;
} else if ((*TxDataSize > 6656) && (*TxDataSize <= 7680)) {
*TxDataSize = 7680;
*RxDataSize = 7680;
*RxStatusSize = 512;
TxFifoOption = 8;
} else if ((*TxDataSize > 7680) && (*TxDataSize <= 8704)) {
*TxDataSize = 8704;
*RxDataSize = 6720;
*RxStatusSize = 448;
TxFifoOption = 9;
} else if ((*TxDataSize > 8704) && (*TxDataSize <= 9728)) {
*TxDataSize = 9728;
*RxDataSize = 5760;
*RxStatusSize = 384;
TxFifoOption = 10;
} else if ((*TxDataSize > 9728) && (*TxDataSize <= 10752)) {
*TxDataSize = 10752;
*RxDataSize = 4800;
*RxStatusSize = 320;
TxFifoOption = 11;
} else if ((*TxDataSize > 10752) && (*TxDataSize <= 11776)) {
*TxDataSize = 11776;
*RxDataSize = 3840;
*RxStatusSize = 256;
TxFifoOption = 12;
} else if ((*TxDataSize > 11776) && (*TxDataSize <= 12800)) {
*TxDataSize = 12800;
*RxDataSize = 2880;
*RxStatusSize = 192;
TxFifoOption = 13;
} else if ((*TxDataSize > 12800) && (*TxDataSize <= 13824)) {
*TxDataSize = 13824;
*RxDataSize = 1920;
*RxStatusSize = 128;
TxFifoOption = 14;
}
} else {
ASSERT(0); // Untested code path
HwConf = 0;
TxFifoOption = 0;
}
// Do we need DMA?
if (Flags & ALLOC_USE_DMA) {
return EFI_UNSUPPORTED; // Unsupported as of now
}
// Clear and assign the new size option
HwConf &= ~(0xF0000);
HwConf |= ((TxFifoOption & 0xF) << 16);
Lan9118MmioWrite32 (LAN9118_HW_CFG, HwConf);
return EFI_SUCCESS;
}
| google/google-ctf | third_party/edk2/EmbeddedPkg/Drivers/Lan9118Dxe/Lan9118DxeUtil.c | C | apache-2.0 | 28,050 |
/*
* Copyright 2016 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.config.materials.git;
import com.googlecode.junit.ext.JunitExtRunner;
import com.thoughtworks.go.domain.materials.Modification;
import com.thoughtworks.go.domain.materials.RevisionContext;
import com.thoughtworks.go.domain.materials.TestSubprocessExecutionContext;
import com.thoughtworks.go.domain.materials.git.GitCommand;
import com.thoughtworks.go.domain.materials.git.GitTestRepo;
import com.thoughtworks.go.domain.materials.mercurial.StringRevision;
import com.thoughtworks.go.helper.TestRepo;
import com.thoughtworks.go.util.SystemEnvironment;
import com.thoughtworks.go.util.TestFileUtil;
import org.hamcrest.Matchers;
import org.hamcrest.core.Is;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.thoughtworks.go.domain.materials.git.GitTestRepo.*;
import static com.thoughtworks.go.util.command.ProcessOutputStreamConsumer.inMemoryConsumer;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(JunitExtRunner.class)
public class GitMaterialShallowCloneTest {
private GitTestRepo repo;
private File workingDir;
@Before
public void setup() throws Exception {
repo = new GitTestRepo();
workingDir = TestFileUtil.createUniqueTempFolder("working");
}
@After
public void teardown() throws Exception {
TestRepo.internalTearDown();
}
@Test
public void defaultShallowFlagIsOff() throws Exception {
assertThat(new GitMaterial(repo.projectRepositoryUrl()).isShallowClone(), is(false));
assertThat(new GitMaterial(repo.projectRepositoryUrl(), null).isShallowClone(), is(false));
assertThat(new GitMaterial(repo.projectRepositoryUrl(), true).isShallowClone(), is(true));
assertThat(new GitMaterial(new GitMaterialConfig(repo.projectRepositoryUrl())).isShallowClone(), is(false));
assertThat(new GitMaterial(new GitMaterialConfig(repo.projectRepositoryUrl(), GitMaterialConfig.DEFAULT_BRANCH, true)).isShallowClone(), is(true));
assertThat(new GitMaterial(new GitMaterialConfig(repo.projectRepositoryUrl(), GitMaterialConfig.DEFAULT_BRANCH, false)).isShallowClone(), is(false));
TestRepo.internalTearDown();
}
@Test
public void shouldGetLatestModificationWithShallowClone() throws IOException {
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
List<Modification> mods = material.latestModification(workingDir, context());
assertThat(mods.size(), is(1));
assertThat(mods.get(0).getComment(), Matchers.is("Added 'run-till-file-exists' ant target"));
assertThat(localRepoFor(material).isShallow(), is(true));
assertThat(localRepoFor(material).containsRevisionInBranch(REVISION_0), is(false));
assertThat(localRepoFor(material).currentRevision(), is(REVISION_4.getRevision()));
}
@Test
public void shouldGetModificationSinceANotInitiallyClonedRevision() {
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
List<Modification> modifications = material.modificationsSince(workingDir, REVISION_0, context());
assertThat(modifications.size(), is(4));
assertThat(modifications.get(0).getRevision(), is(REVISION_4.getRevision()));
assertThat(modifications.get(0).getComment(), is("Added 'run-till-file-exists' ant target"));
assertThat(modifications.get(1).getRevision(), is(REVISION_3.getRevision()));
assertThat(modifications.get(1).getComment(), is("adding build.xml"));
assertThat(modifications.get(2).getRevision(), is(REVISION_2.getRevision()));
assertThat(modifications.get(2).getComment(), is("Created second.txt from first.txt"));
assertThat(modifications.get(3).getRevision(), is(REVISION_1.getRevision()));
assertThat(modifications.get(3).getComment(), is("Added second line"));
}
@Test
public void shouldBeAbleToUpdateToRevisionNotFetched() {
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
material.updateTo(inMemoryConsumer(), workingDir, new RevisionContext(REVISION_3, REVISION_2, 2), context());
assertThat(localRepoFor(material).currentRevision(), is(REVISION_3.getRevision()));
assertThat(localRepoFor(material).containsRevisionInBranch(REVISION_2), is(true));
assertThat(localRepoFor(material).containsRevisionInBranch(REVISION_3), is(true));
}
@Test
public void configShouldIncludesShallowFlag() {
GitMaterialConfig shallowConfig = (GitMaterialConfig) new GitMaterial(repo.projectRepositoryUrl(), true).config();
assertThat(shallowConfig.isShallowClone(), is(true));
GitMaterialConfig normalConfig = (GitMaterialConfig) new GitMaterial(repo.projectRepositoryUrl(), null).config();
assertThat(normalConfig.isShallowClone(), is(false));
}
@Test
public void xmlAttributesShouldIncludesShallowFlag() {
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
assertThat(material.getAttributesForXml().get("shallowClone"), Is.<Object>is(true));
}
@Test
public void attributesShouldIncludeShallowFlag() {
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
Map gitConfig = (Map) (material.getAttributes(false).get("git-configuration"));
assertThat(gitConfig.get("shallow-clone"), Is.<Object>is(true));
}
@Test
public void shouldConvertExistingRepoToFullRepoWhenShallowCloneIsOff() {
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
material.latestModification(workingDir, context());
assertThat(localRepoFor(material).isShallow(), is(true));
material = new GitMaterial(repo.projectRepositoryUrl(), false);
material.latestModification(workingDir, context());
assertThat(localRepoFor(material).isShallow(), is(false));
}
@Test
public void withShallowCloneShouldGenerateANewMaterialWithOverriddenShallowConfig() {
GitMaterial original = new GitMaterial(repo.projectRepositoryUrl(), false);
assertThat(original.withShallowClone(true).isShallowClone(), is(true));
assertThat(original.withShallowClone(false).isShallowClone(), is(false));
assertThat(original.isShallowClone(), is(false));
}
@Test
public void updateToANewRevisionShouldNotResultInUnshallowing() throws IOException {
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
material.updateTo(inMemoryConsumer(), workingDir, new RevisionContext(REVISION_4, REVISION_4, 1), context());
assertThat(localRepoFor(material).isShallow(), is(true));
List<Modification> modifications = repo.addFileAndPush("newfile", "add new file");
StringRevision newRevision = new StringRevision(modifications.get(0).getRevision());
material.updateTo(inMemoryConsumer(), workingDir, new RevisionContext(newRevision, newRevision, 1), context());
assertThat(new File(workingDir, "newfile").exists(), is(true));
assertThat(localRepoFor(material).isShallow(), is(true));
}
@Test
public void shouldUnshallowServerSideRepoCompletelyOnRetrievingModificationsSincePreviousRevision() {
SystemEnvironment mockSystemEnvironment = mock(SystemEnvironment.class);
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
when(mockSystemEnvironment.get(SystemEnvironment.GO_SERVER_SHALLOW_CLONE)).thenReturn(false);
material.modificationsSince(workingDir, REVISION_4, new TestSubprocessExecutionContext(mockSystemEnvironment, true));
assertThat(localRepoFor(material).isShallow(), is(false));
}
@Test
public void shouldNotUnshallowOnServerSideIfShallowClonePropertyIsOnAndRepoIsAlreadyShallow() {
SystemEnvironment mockSystemEnvironment = mock(SystemEnvironment.class);
GitMaterial material = new GitMaterial(repo.projectRepositoryUrl(), true);
when(mockSystemEnvironment.get(SystemEnvironment.GO_SERVER_SHALLOW_CLONE)).thenReturn(true);
material.modificationsSince(workingDir, REVISION_4, new TestSubprocessExecutionContext(mockSystemEnvironment, false));
assertThat(localRepoFor(material).isShallow(), is(true));
}
private TestSubprocessExecutionContext context() {
return new TestSubprocessExecutionContext();
}
private GitCommand localRepoFor(GitMaterial material) {
return new GitCommand(material.getFingerprint(), workingDir, GitMaterialConfig.DEFAULT_BRANCH, false, new HashMap<String, String>());
}
}
| aj-jaswanth/gocd | common/test/unit/com/thoughtworks/go/config/materials/git/GitMaterialShallowCloneTest.java | Java | apache-2.0 | 9,551 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.layout;
import static org.junit.Assert.assertEquals;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import org.apache.commons.csv.CSVFormat;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.categories.Layouts;
import org.apache.logging.log4j.core.Appender;
import org.apache.logging.log4j.core.BasicConfigurationFactory;
import org.apache.logging.log4j.core.Logger;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.config.ConfigurationFactory;
import org.apache.logging.log4j.junit.ThreadContextRule;
import org.apache.logging.log4j.test.appender.ListAppender;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Tests {@link AbstractCsvLayout}.
*
* @since 2.4
*/
@Category(Layouts.Csv.class)
public class CsvLogEventLayoutTest {
static ConfigurationFactory cf = new BasicConfigurationFactory();
@Rule
public final ThreadContextRule threadContextRule = new ThreadContextRule();
@AfterClass
public static void cleanupClass() {
ConfigurationFactory.removeConfigurationFactory(cf);
}
@BeforeClass
public static void setupClass() {
ConfigurationFactory.setConfigurationFactory(cf);
final LoggerContext ctx = LoggerContext.getContext();
ctx.reconfigure();
}
private final LoggerContext ctx = LoggerContext.getContext();
private final Logger root = ctx.getRootLogger();
@Test
public void testCustomCharset() {
final AbstractCsvLayout layout = CsvLogEventLayout.createLayout(null, "Excel", null, null, null, null, null,
null, StandardCharsets.UTF_16, null, null);
assertEquals("text/csv; charset=UTF-16", layout.getContentType());
}
@Test
public void testHeaderFooter() {
final String header = "# Header";
final String footer = "# Footer ";
final AbstractCsvLayout layout = CsvLogEventLayout.createLayout(ctx.getConfiguration(), "Excel", null, null,
null, null, null, null, null, header, footer);
testLayout(CSVFormat.DEFAULT, layout, header, footer);
}
@Test
public void testDefaultCharset() {
final AbstractCsvLayout layout = CsvLogEventLayout.createDefaultLayout();
assertEquals(StandardCharsets.UTF_8, layout.getCharset());
}
@Test
public void testDefaultContentType() {
final AbstractCsvLayout layout = CsvLogEventLayout.createDefaultLayout();
assertEquals("text/csv; charset=UTF-8", layout.getContentType());
}
private void testLayout(final CSVFormat format) {
testLayout(format, CsvLogEventLayout.createLayout(format), null, null);
}
private void testLayout(final CSVFormat format, final AbstractCsvLayout layout, final String header, final String footer) {
final Map<String, Appender> appenders = root.getAppenders();
for (final Appender appender : appenders.values()) {
root.removeAppender(appender);
}
// set up appender
final ListAppender appender = new ListAppender("List", null, layout, true, false);
appender.start();
// set appender on root and set level to debug
root.addAppender(appender);
root.setLevel(Level.DEBUG);
root.debug("one={}, two={}, three={}", 1, 2, 3);
root.info("Hello");
appender.stop();
final List<String> list = appender.getMessages();
final boolean hasHeaderSerializer = layout.getHeaderSerializer() != null;
final boolean hasFooterSerializer = layout.getFooterSerializer() != null;
final int headerOffset = hasHeaderSerializer ? 1 : 0;
final String event0 = list.get(0 + headerOffset);
final String event1 = list.get(1 + headerOffset);
final char del = format.getDelimiter();
Assert.assertTrue(event0, event0.contains(del + "DEBUG" + del));
final String quote = del == ',' ? "\"" : "";
Assert.assertTrue(event0, event0.contains(del + quote + "one=1, two=2, three=3" + quote + del));
Assert.assertTrue(event1, event1.contains(del + "INFO" + del));
if (hasHeaderSerializer && header == null) {
Assert.fail();
}
if (!hasHeaderSerializer && header != null) {
Assert.fail();
}
if (hasFooterSerializer && footer == null) {
Assert.fail();
}
if (!hasFooterSerializer && footer != null) {
Assert.fail();
}
if (hasHeaderSerializer) {
Assert.assertEquals(list.toString(), header, list.get(0));
}
if (hasFooterSerializer) {
Assert.assertEquals(list.toString(), footer, list.get(list.size() - 1));
}
}
@Test
public void testLayoutDefault() throws Exception {
testLayout(CSVFormat.DEFAULT);
}
@Test
public void testLayoutExcel() throws Exception {
testLayout(CSVFormat.EXCEL);
}
@Test
public void testLayoutMySQL() throws Exception {
testLayout(CSVFormat.MYSQL);
}
@Test
public void testLayoutRFC4180() throws Exception {
testLayout(CSVFormat.RFC4180);
}
@Test
public void testLayoutTab() throws Exception {
testLayout(CSVFormat.TDF);
}
}
| codescale/logging-log4j2 | log4j-core/src/test/java/org/apache/logging/log4j/core/layout/CsvLogEventLayoutTest.java | Java | apache-2.0 | 6,292 |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2015 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Test suite for top-level functions.
"""
import unittest
import os
import tempfile
import shutil
import pydoop
class TestPydoop(unittest.TestCase):
def setUp(self):
self.wd = tempfile.mkdtemp(prefix='pydoop_test_')
self.old_env = os.environ.copy()
def tearDown(self):
shutil.rmtree(self.wd)
os.environ = self.old_env
reload(pydoop)
def test_home(self):
old_home = pydoop.hadoop_home()
if os.path.isdir(old_home):
new_home = os.path.join(self.wd, 'hadoop')
os.symlink(old_home, new_home)
os.environ['HADOOP_HOME'] = new_home
reload(pydoop)
self.assertEqual(pydoop.hadoop_home(), new_home)
def test_conf(self):
os.environ['HADOOP_CONF_DIR'] = self.wd
reload(pydoop)
self.assertEqual(pydoop.hadoop_conf(), self.wd)
def test_pydoop_jar_path(self):
jar_path = pydoop.jar_path()
if jar_path is not None:
self.assertTrue(os.path.exists(jar_path))
directory, filename = os.path.split(jar_path)
self.assertEqual(filename, pydoop.jar_name())
self.assertEqual('pydoop', os.path.basename(directory))
def suite():
suite_ = unittest.TestSuite()
suite_.addTest(TestPydoop('test_home'))
suite_.addTest(TestPydoop('test_conf'))
suite_.addTest(TestPydoop('test_pydoop_jar_path'))
return suite_
if __name__ == '__main__':
_RUNNER = unittest.TextTestRunner(verbosity=2)
_RUNNER.run((suite()))
| ilveroluca/pydoop | test/common/test_pydoop.py | Python | apache-2.0 | 2,152 |
/*
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#pragma warning disable 1591
using System;
using System.ComponentModel;
using System.Diagnostics;
using System.Xml;
using System.Xml.Serialization;
namespace DotNetCasClient.Validation.Schema.SoapEnvelope
{
[Serializable]
[DebuggerStepThrough]
[DesignerCategory("code")]
[XmlType(Namespace="http://schemas.xmlsoap.org/soap/envelope/")]
[XmlRoot("detail", Namespace = "http://schemas.xmlsoap.org/soap/envelope/", IsNullable = false)]
public class Detail {
[XmlAnyElement]
public XmlElement[] Any
{
get;
set;
}
[XmlAnyAttribute]
public XmlAttribute[] AnyAttr
{
get;
set;
}
}
}
#pragma warning restore 1591 | apereo/dotnet-cas-client | DotNetCasClient/Validation/Schema/SoapEnvelope/Detail.cs | C# | apache-2.0 | 1,525 |
package org.apache.taverna.scufl2.api.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* The top level interface for all objects in a workflow.
*
* @author Alan R Williams
* @author Stian Soiland-Reyes
*/
public interface WorkflowBean extends Cloneable {
/**
* Accepts a {@link Visitor} to this <code>WorkflowBean</code>.
*
* @param visitor
* the <code>Visitor</code> to accept
* @return <code>true</code> if this <code>WorkflowBeans</code> children
* should be visited.
*/
boolean accept(Visitor visitor);
/**
* Create a deep clone of this bean.
* <p>
* The cloned bean will have equivalent properties as the original bean. Any
* {@link Child} beans which parent match this bean will be cloned as well
* (recursively), non-child beans will remain the same. If this bean is a
* {@link Child}, the returned clone will not have a parent set.
* <p>
* Note that children whose {@link Child#getParent()} is <code>null</code>
* might not be cloned, to avoid this, use
* {@link Scufl2Tools#setParents(org.apache.taverna.scufl2.api.container.WorkflowBundle)}
* before cloning.
*
* @return A cloned workflow bean
*/
// @Override
WorkflowBean clone();
}
| binfalse/incubator-taverna-language | taverna-scufl2-api/src/main/java/org/apache/taverna/scufl2/api/common/WorkflowBean.java | Java | apache-2.0 | 1,995 |
<!--
* Copyright (c) 2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
-->
<!DOCTYPE html>
<html>
<head>
<title>Liberty Starter</title>
<!-- <link rel="stylesheet" href="css/bootstrap.min.css">-->
<meta charset="utf-8"/>
<link
href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"
rel="stylesheet" type="text/css" />
<link
href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap-theme.min.css"
rel="stylesheet" type="text/css" />
<link rel="stylesheet" href="css/liberty.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.4.3/angular.min.js"></script>
<script src="https://cdn.rawgit.com/eligrey/FileSaver.js/master/FileSaver.js"></script>
<script src="js/v1/plugin.js"></script>
</head>
<body>
<div ng-app="libertyTechnologyDeveloperApp" ng-controller="libertyTechnologyDeveloperController">
<div class="container-fluid">
<div class="row">
<div id="header" class="col-sm-12">
<div class="page-header">
<h1>Build your own Liberty starter plugin!</h1>
<h2>Define the attributes of your Liberty technology, download the app, configure it and then deploy it somewhere.</h2>
</div>
</div>
</div>
<div class="row">
<!-- begin main view -->
<div id="main" class="col-sm-6">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Technology Details</h3>
</div>
<div class="panel-body">
<div class="row">
<div class="col-sm-1"></div>
<div class="col-sm-10">
<label>Technology Name:</label><input name="techName" type="text" data-ng-model="form.techName" />
<label>Unique namespace:</label><input name="namespace" type="text" data-ng-model="form.namespace" />
</div>
<div class="col-sm-1"></div>
</div>
<div class="row">
<div class="submitClass">
<div class="col-sm-1"></div>
<div class="col-sm-10">
<button type="button" class="btn btn-success btn-md"
ng-click="submitTech()">
<span class="glyphicon glyphicon-save"></span>Submit New Tech Type
</button>
</div>
<div class="col-sm-1"></div>
</div>
</div>
<div class="row" ng-if="returnedApiKey">
<div class="col-sm-1"></div>
<div class="col-sm-10">
The API Key is : {{returnedApiKey}}
</div>
<div class="col-sm-1"></div>
</div>
<div class="row" ng-if="failureMessage">
<div class="col-sm-1"></div>
<div class="col-sm-10">
Something went wrong : {{failureMessage}}
</div>
<div class="col-sm-1"></div>
</div>
</div>
</div>
</div>
<!-- begin side view -->
<div class="col-sm-6">
<div class="row">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Getting started...</h3>
</div>
<div class="panel-body">
<ol>
<li>Fill out the details above</li>
<li>Click <code>Submit new Tech Type</code>. Make a note of your API key (you need to keep this secure)</li>
<li>Unzip the project you download and add your project-specific files in</li>
<li>Start up your application</li>
</ol>
</div>
</div>
</div>
<div class="row">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Help us to help you!</h3>
</div>
<div class="panel-body">
<p>To give us feedback please
raise an <a href="https://github.com/WASdev/tool.accelerate.core/issues/">issue</a> on our <a href="https://github.com/WASdev/tool.artisan.core/">GitHub page</a>.</p>
</div>
</div>
</div>
</div>
</div>
<div><hr/></div>
<div class="row">
<div class="col-sm-9">
<p>Copyright (c) 2015 IBM Corp.</p>
<p>Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You
may obtain a copy of the License at
<a href="http://www.apache.org/licenses/LICENSE-2.0">www.apache.org/licenses/LICENSE-2.0</a>.</p>
<p>Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<br>
See the License for the specific language governing permissions and
limitations under the License.</p>
</div>
<div class="col-sm-3">
<div class="row">
<div class="col-sm-10">
<div class="poweredBy">Powered by Liberty <img src="images/liberty-was-logo.png"/></div>
</div>
</div>
</div>
</div>
</div>
</div>
</body>
</html>
| WASdev/tool.accelerate.core | liberty-starter-application/src/main/webapp/plugin.html | HTML | apache-2.0 | 7,701 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.extensions.sql.impl.rule;
import org.apache.beam.sdk.extensions.sql.impl.rel.BeamLogicalConvention;
import org.apache.beam.sdk.extensions.sql.impl.rel.BeamProjectRel;
import org.apache.calcite.plan.Convention;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.convert.ConverterRule;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rel.logical.LogicalProject;
/** A {@code ConverterRule} to replace {@link Project} with {@link BeamProjectRel}. */
public class BeamProjectRule extends ConverterRule {
public static final BeamProjectRule INSTANCE = new BeamProjectRule();
private BeamProjectRule() {
super(LogicalProject.class, Convention.NONE, BeamLogicalConvention.INSTANCE, "BeamProjectRule");
}
@Override
public RelNode convert(RelNode rel) {
final Project project = (Project) rel;
final RelNode input = project.getInput();
return new BeamProjectRel(
project.getCluster(),
project.getTraitSet().replace(BeamLogicalConvention.INSTANCE),
convert(input, input.getTraitSet().replace(BeamLogicalConvention.INSTANCE)),
project.getProjects(),
project.getRowType());
}
}
| tgroh/incubator-beam | sdks/java/extensions/sql/src/main/java/org/apache/beam/sdk/extensions/sql/impl/rule/BeamProjectRule.java | Java | apache-2.0 | 2,003 |
package org.ovirt.engine.ui.genericapi.returnvalues;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
import org.ovirt.engine.core.common.queries.ValueObject;
@XmlAccessorType(XmlAccessType.NONE)
@XmlType(name = "UIQueryReturnValue")
public class UIQueryReturnValue {
private Object returnValue;
private boolean succeeded;
@XmlElement(name = "ReturnValueWrapper")
public ValueObject getSerializaedReturnValue() {
return ValueObject.createValueObject(returnValue);
}
public Object getReturnValue() {
return returnValue;
}
public void setReturnValue(Object returnValue) {
this.returnValue = returnValue;
}
@XmlElement(name = "Succeeded")
public boolean getSucceeded() {
return succeeded;
}
public void setSucceeded(boolean value) {
succeeded = value;
}
}
| raksha-rao/gluster-ovirt | frontend/api/genericapi/src/main/java/org/ovirt/engine/ui/genericapi/returnvalues/UIQueryReturnValue.java | Java | apache-2.0 | 994 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.snapshot;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Collection;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test that we correctly reload the cache, filter directories, etc.
*/
@Category(MediumTests.class)
public class TestSnapshotFileCache {
private static final Log LOG = LogFactory.getLog(TestSnapshotFileCache.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static long sequenceId = 0;
private static FileSystem fs;
private static Path rootDir;
@BeforeClass
public static void startCluster() throws Exception {
UTIL.startMiniDFSCluster(1);
fs = UTIL.getDFSCluster().getFileSystem();
rootDir = UTIL.getDefaultRootDirPath();
}
@AfterClass
public static void stopCluster() throws Exception {
UTIL.shutdownMiniDFSCluster();
}
@After
public void cleanupFiles() throws Exception {
// cleanup the snapshot directory
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
fs.delete(snapshotDir, true);
}
@Test(timeout = 10000000)
public void testLoadAndDelete() throws IOException {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
createAndTestSnapshotV1(cache, "snapshot1a", false, true);
createAndTestSnapshotV1(cache, "snapshot1b", true, true);
createAndTestSnapshotV2(cache, "snapshot2a", false, true);
createAndTestSnapshotV2(cache, "snapshot2b", true, true);
}
@Test
public void testJustFindLogsDirectory() throws Exception {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFileCache.SnapshotFileInspector() {
public Collection<String> filesUnderSnapshot(final Path snapshotDir)
throws IOException {
return SnapshotReferenceUtil.getHLogNames(fs, snapshotDir);
}
});
// create a file in a 'completed' snapshot
SnapshotDescription desc = SnapshotDescription.newBuilder().setName("snapshot").build();
Path snapshot = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir);
SnapshotDescriptionUtils.writeSnapshotInfo(desc, snapshot, fs);
Path file1 = new Path(new Path(new Path(snapshot, "7e91021"), "fam"), "file1");
fs.createNewFile(file1);
// and another file in the logs directory
Path logs = getSnapshotHLogsDir(snapshot, "server");
Path log = new Path(logs, "me.hbase.com%2C58939%2C1350424310315.1350424315552");
fs.createNewFile(log);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// then make sure the cache only finds the log files
assertFalse("Cache found '" + file1 + "', but it shouldn't have.",
cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + log, cache.contains(log.getName()));
}
/**
* Get the log directory for a specific snapshot
* @param snapshotDir directory where the specific snapshot will be store
* @param serverName name of the parent regionserver for the log files
* @return path to the log home directory for the archive files.
*/
public static Path getSnapshotHLogsDir(Path snapshotDir, String serverName) {
return new Path(snapshotDir, HLogUtil.getHLogDirectoryName(serverName));
}
@Test
public void testReloadModifiedDirectory() throws IOException {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
createAndTestSnapshotV1(cache, "snapshot1", false, true);
// now delete the snapshot and add a file with a different name
createAndTestSnapshotV1(cache, "snapshot1", false, false);
createAndTestSnapshotV2(cache, "snapshot2", false, true);
// now delete the snapshot and add a file with a different name
createAndTestSnapshotV2(cache, "snapshot2", false, false);
}
@Test
public void testSnapshotTempDirReload() throws IOException {
long period = Long.MAX_VALUE;
// This doesn't refresh cache until we invoke it explicitly
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
// Add a new non-tmp snapshot
createAndTestSnapshotV1(cache, "snapshot0v1", false, false);
createAndTestSnapshotV1(cache, "snapshot0v2", false, false);
// Add a new tmp snapshot
createAndTestSnapshotV2(cache, "snapshot1", true, false);
// Add another tmp snapshot
createAndTestSnapshotV2(cache, "snapshot2", true, false);
}
class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector {
public Collection<String> filesUnderSnapshot(final Path snapshotDir) throws IOException {
Collection<String> files = new HashSet<String>();
files.addAll(SnapshotReferenceUtil.getHLogNames(fs, snapshotDir));
files.addAll(SnapshotReferenceUtil.getHFileNames(UTIL.getConfiguration(), fs, snapshotDir));
return files;
}
};
private void createAndTestSnapshotV1(final SnapshotFileCache cache, final String name,
final boolean tmp, final boolean removeOnExit) throws IOException {
SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1(name);
createAndTestSnapshot(cache, builder, tmp, removeOnExit);
}
private void createAndTestSnapshotV2(final SnapshotFileCache cache, final String name,
final boolean tmp, final boolean removeOnExit) throws IOException {
SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(name);
createAndTestSnapshot(cache, builder, tmp, removeOnExit);
}
private void createAndTestSnapshot(final SnapshotFileCache cache,
final SnapshotMock.SnapshotBuilder builder,
final boolean tmp, final boolean removeOnExit) throws IOException {
List<String> files = new ArrayList<String>();
for (int i = 0; i < 3; ++i) {
for (Path filePath: builder.addRegion()) {
String fileName = filePath.getName();
if (tmp) {
// We should be able to find all the files while the snapshot creation is in-progress
FSUtils.logFileSystemState(fs, rootDir, LOG);
assertTrue("Cache didn't find " + fileName, cache.contains(fileName));
}
files.add(fileName);
}
}
// Finalize the snapshot
if (!tmp) {
builder.commit();
}
// Make sure that all files are still present
for (String fileName: files) {
assertTrue("Cache didn't find " + fileName, cache.contains(fileName));
}
FSUtils.logFileSystemState(fs, rootDir, LOG);
if (removeOnExit) {
LOG.debug("Deleting snapshot.");
fs.delete(builder.getSnapshotsDir(), true);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// The files should be in cache until next refresh
for (String fileName: files) {
assertTrue("Cache didn't find " + fileName, cache.contains(fileName));
}
// then trigger a refresh
cache.triggerCacheRefreshForTesting();
// and not it shouldn't find those files
for (String fileName: files) {
assertFalse("Cache found '" + fileName + "', but it shouldn't have.",
cache.contains(fileName));
}
}
}
}
| intel-hadoop/hbase-rhino | hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java | Java | apache-2.0 | 9,592 |
module.exports = function(grunt) {
require('load-grunt-tasks')(grunt);
grunt.initConfig({
clean: ['dist'],
ts: {
default : {
options: {
compiler: './node_modules/typescript/bin/tsc',
module: "commonjs",
fast: 'never',
preserveConstEnums: true
},
src: 'src/**/*.ts',
outDir: 'dist'
},
watch : {
options: {
compiler: './node_modules/typescript/bin/tsc',
module: "commonjs",
fast: 'never',
preserveConstEnums: true
},
src: 'src/**/*.ts',
watch: 'src/',
outDir: 'dist'
}
},
copy: {
main: {
src: './lib/runtime.d.ts',
dest: './dist/runtime.d.ts'
}
},
bump : {
options : {
files : ['package.json'],
updateConfigs : [],
commit : true,
commitMessage : 'chore(ver): v%VERSION%',
commitFiles : ['package.json', 'CHANGELOG.md'],
createTag : true,
tagName : 'v%VERSION%',
tagMessage : 'chore(ver): v%VERSION%',
push : true,
pushTo : 'origin',
gitDescribeOptions : '--tags --always --abbrev=1 --dirty=-d',
globalReplace : false,
prereleaseName : "rc",
regExp : false
}
},
shell : {
addChangelog : {
command : 'git add CHANGELOG.md'
}
},
changelog : {
options : {
}
}
});
grunt.registerTask("release", "Release a new version", function(target) {
if(!target) {
target = "minor";
}
return grunt.task.run("bump-only:" + target, "changelog", "shell:addChangelog", "bump-commit");
});
grunt.registerTask('default', ['ts:default', 'copy']);
};
| aayushkapoor206/whatshot | node_modules/awesome-typescript-loader/Gruntfile.js | JavaScript | apache-2.0 | 2,195 |
package com.michelboudreau.test;
import com.amazonaws.services.dynamodb.datamodeling.DynamoDBMapper;
import com.amazonaws.services.dynamodb.datamodeling.DynamoDBMapperConfig;
import com.amazonaws.services.dynamodb.datamodeling.DynamoDBQueryExpression;
import com.amazonaws.services.dynamodb.model.AttributeValue;
import com.amazonaws.services.dynamodb.model.ComparisonOperator;
import com.amazonaws.services.dynamodb.model.Condition;
import com.amazonaws.services.dynamodb.model.KeySchema;
import com.amazonaws.services.dynamodb.model.KeySchemaElement;
import com.amazonaws.services.dynamodb.model.ResourceInUseException;
import com.amazonaws.services.dynamodb.model.ScalarAttributeType;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import java.util.ArrayList;
import java.util.List;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations =
{
"classpath:/applicationContext.xml"
})
public class AlternatorMapperTest extends AlternatorTest
{
private final String hashTableName = "mapper.TestClassWithHashKey";
private final String hashRangeTableName = "mapper.TestClassWithRangeHashKey";
private DynamoDBMapper mapper;
private DynamoDBMapper createMapper()
{
return new DynamoDBMapper(getClient(), createMapperConfiguration());
}
private DynamoDBMapperConfig createMapperConfiguration()
{
return new DynamoDBMapperConfig(
DynamoDBMapperConfig.SaveBehavior.CLOBBER,
DynamoDBMapperConfig.ConsistentReads.CONSISTENT,
null);
}
@Before
public void setUp() throws Exception
{
mapper = createMapper();
}
@After
public void tearDown() throws Exception
{
deleteAllTables();
}
//Test: put item with HashKey
@Test
public void putItemWithHashKey()
{
KeySchema schema =
new KeySchema(
new KeySchemaElement().withAttributeName("code").withAttributeType(ScalarAttributeType.S)
);
createTable(hashTableName, schema);
TestClassWithHashKey value = new TestClassWithHashKey();
value.setCode("hash1");
value.setStringData("string1");
value.setIntData(1);
mapper.save(value);
}
@Test
public void putItemWithHashKeyOverwriteItem()
{
KeySchema schema =
new KeySchema(
new KeySchemaElement().withAttributeName("code").withAttributeType(ScalarAttributeType.S)
);
try {
createTable(hashTableName, schema);
} catch (ResourceInUseException riue) {
// The table is already created, do nothing
}
TestClassWithHashKey value2a = new TestClassWithHashKey();
value2a.setCode("hash2");
value2a.setStringData("string2a");
value2a.setIntData(21);
mapper.save(value2a);
TestClassWithHashKey value2b = new TestClassWithHashKey();
value2b.setCode("hash2");
value2b.setStringData("string2b");
value2b.setIntData(22);
mapper.save(value2b);
}
@Test
public void putItemWithHashKeyAndRangeKey()
{
KeySchema schema =
new KeySchema(
new KeySchemaElement().withAttributeName("hashCode").withAttributeType(ScalarAttributeType.S)
);
schema.setRangeKeyElement(new KeySchemaElement().withAttributeName(
"rangeCode").withAttributeType(ScalarAttributeType.S));
createTable(hashRangeTableName, schema);
TestClassWithHashRangeKey value = new TestClassWithHashRangeKey();
value.setHashCode("hash1");
value.setRangeCode("range1");
value.setStringData("string1");
value.setIntData(1);
mapper.save(value);
}
@Test
public void putItemWithHashKeyAndRangeKeyOverwriteItem()
{
KeySchema schema =
new KeySchema(
new KeySchemaElement().withAttributeName("hashCode").withAttributeType(ScalarAttributeType.S)
);
schema.setRangeKeyElement(new KeySchemaElement().withAttributeName(
"rangeCode").withAttributeType(ScalarAttributeType.S));
try {
createTable(hashRangeTableName, schema);
} catch (ResourceInUseException riue) {
// The table is already created
}
TestClassWithHashRangeKey value2a = new TestClassWithHashRangeKey();
value2a.setHashCode("hash2");
value2a.setRangeCode("range2");
value2a.setStringData("string2a");
value2a.setIntData(21);
mapper.save(value2a);
TestClassWithHashRangeKey value2b = new TestClassWithHashRangeKey();
value2b.setHashCode("hash2");
value2b.setRangeCode("range2");
value2b.setStringData("string2b");
value2b.setIntData(22);
mapper.save(value2b);
}
@Test
public void getHashItemTest()
{
putItemWithHashKey();
String code = "hash1";
TestClassWithHashKey value = mapper.load(TestClassWithHashKey.class, code);
Assert.assertNotNull("Value not found.", value);
Assert.assertEquals("Wrong code.", code, value.getCode());
Assert.assertEquals("Wrong stringData.", "string1", value.getStringData());
Assert.assertEquals("Wrong intData.", 1, value.getIntData());
}
@Test
public void getUnknownHashItemTest()
{
KeySchema schema = new KeySchema(new KeySchemaElement().withAttributeName("code").withAttributeType(ScalarAttributeType.S));
createTable(hashTableName, schema);
String code = "hash1x";
TestClassWithHashKey value = mapper.load(TestClassWithHashKey.class, code);
Assert.assertNull("Value should not be found.", value);
}
@Test
public void getHashRangeItemTest()
{
putItemWithHashKeyAndRangeKey();
putItemWithHashKeyAndRangeKeyOverwriteItem();
TestClassWithHashRangeKey value2c = new TestClassWithHashRangeKey();
value2c.setHashCode("hash2");
value2c.setRangeCode("range2c");
value2c.setStringData("string2c");
value2c.setIntData(23);
mapper.save(value2c);
String hashCode = "hash2";
String rangeCode = "range2";
TestClassWithHashRangeKey value = mapper.load(TestClassWithHashRangeKey.class, hashCode, rangeCode);
Assert.assertNotNull("Value not found.", value);
Assert.assertEquals("Wrong hashCode.", hashCode, value.getHashCode());
Assert.assertEquals("Wrong rangeCode.", rangeCode, value.getRangeCode());
Assert.assertEquals("Wrong stringData.", "string2b", value.getStringData());
Assert.assertEquals("Wrong intData.", 22, value.getIntData());
}
@Test
public void getUnknownHashRangeItemTest()
{
KeySchema schema = new KeySchema(new KeySchemaElement().withAttributeName("hashCode").withAttributeType(ScalarAttributeType.S));
schema.setRangeKeyElement(new KeySchemaElement().withAttributeName("rangeCode").withAttributeType(ScalarAttributeType.S));
createTable(hashRangeTableName, schema);
String hashCode = "hash2x";
String rangeCode = "range2";
TestClassWithHashRangeKey value = mapper.load(TestClassWithHashRangeKey.class, hashCode, rangeCode);
Assert.assertNull("Value should not be found (" + hashCode + "/" + rangeCode, value);
hashCode = "hash2";
rangeCode = "range2x";
value = mapper.load(TestClassWithHashRangeKey.class, hashCode, rangeCode);
Assert.assertNull("Value should not be found (" + hashCode + "/" + rangeCode, value);
}
@Test
public void queryWithHashKey() {
putItemWithHashKey();
putItemWithHashKeyOverwriteItem();
String code = "hash1";
DynamoDBQueryExpression query =
new DynamoDBQueryExpression(new AttributeValue().withS(code));
List<TestClassWithHashKey> valueList = mapper.query(TestClassWithHashKey.class, query);
Assert.assertNotNull("Value list is null.", valueList);
Assert.assertNotSame("Value list is empty.", 0, valueList.size());
Assert.assertEquals("Value list has more than one item.", 1, valueList.size());
TestClassWithHashKey value = valueList.get(0);
Assert.assertEquals("Wrong code.", code, value.getCode());
Assert.assertEquals("Wrong stringData.", "string1", value.getStringData());
Assert.assertEquals("Wrong intData.", 1, value.getIntData());
}
@Test
public void queryWithUnknownHashKey() {
putItemWithHashKey();
String code = "hash1x";
DynamoDBQueryExpression query =
new DynamoDBQueryExpression(new AttributeValue().withS(code));
List<TestClassWithHashKey> valueList = mapper.query(TestClassWithHashKey.class, query);
Assert.assertNotNull("Value list is null.", valueList);
Assert.assertEquals("Value list should be empty.", 0, valueList.size());
}
@Test
public void queryWithHashRangeKey() {
putItemWithHashKeyAndRangeKey();
TestClassWithHashRangeKey value2c = new TestClassWithHashRangeKey();
value2c.setHashCode("hash2");
value2c.setRangeCode("range2c");
value2c.setStringData("string2c");
value2c.setIntData(23);
mapper.save(value2c);
TestClassWithHashRangeKey value2d = new TestClassWithHashRangeKey();
value2d.setHashCode("hash2");
value2d.setRangeCode("range2d");
value2d.setStringData("string2d");
value2d.setIntData(24);
mapper.save(value2d);
TestClassWithHashRangeKey value2e = new TestClassWithHashRangeKey();
value2e.setHashCode("hash2");
value2e.setRangeCode("range2e");
value2e.setStringData("string2e");
value2e.setIntData(25);
mapper.save(value2e);
String hashCode = "hash2";
DynamoDBQueryExpression query =
new DynamoDBQueryExpression(new AttributeValue().withS(hashCode));
Condition rangeKeyCondition = new Condition();
List<AttributeValue> attributeValueList = new ArrayList<AttributeValue>();
attributeValueList.add(new AttributeValue().withS("range2c"));
attributeValueList.add(new AttributeValue().withS("range2d"));
rangeKeyCondition.setAttributeValueList(attributeValueList);
rangeKeyCondition.setComparisonOperator(ComparisonOperator.BETWEEN);
query.setRangeKeyCondition(rangeKeyCondition);
List<TestClassWithHashRangeKey> valueList = mapper.query(TestClassWithHashRangeKey.class, query);
Assert.assertNotNull("Value list is null.", valueList);
Assert.assertNotSame("Value list is empty.", 0, valueList.size());
Assert.assertEquals("Value list should have 2 items.", 2, valueList.size());
TestClassWithHashRangeKey value = valueList.get(0);
Assert.assertEquals("Wrong hashCode.", hashCode, value.getHashCode());
Assert.assertEquals("Wrong rangeCode.", "range2c", value.getRangeCode());
Assert.assertEquals("Wrong stringData.", "string2c", value.getStringData());
Assert.assertEquals("Wrong intData.", 23, value.getIntData());
value = valueList.get(1);
Assert.assertEquals("Wrong hashCode.", hashCode, value.getHashCode());
Assert.assertEquals("Wrong rangeCode.", "range2d", value.getRangeCode());
Assert.assertEquals("Wrong stringData.", "string2d", value.getStringData());
Assert.assertEquals("Wrong intData.", 24, value.getIntData());
}
@Test
public void queryWithUnknownHashRangeKey1() {
putItemWithHashKeyAndRangeKey();
String hashCode = "hash1x";
DynamoDBQueryExpression query =
new DynamoDBQueryExpression(new AttributeValue().withS(hashCode));
List<TestClassWithHashRangeKey> valueList = mapper.query(TestClassWithHashRangeKey.class, query);
Assert.assertNotNull("Value list is null.", valueList);
Assert.assertEquals("Value list should be empty.", 0, valueList.size());
}
@Test
public void queryWithUnknownHashRangeKey2() {
putItemWithHashKeyAndRangeKey();
String hashCode = "hash2";
DynamoDBQueryExpression query =
new DynamoDBQueryExpression(new AttributeValue().withS(hashCode));
Condition rangeKeyCondition = new Condition();
List<AttributeValue> attributeValueList = new ArrayList<AttributeValue>();
attributeValueList.add(new AttributeValue().withS("range2x"));
attributeValueList.add(new AttributeValue().withS("range2y"));
rangeKeyCondition.setAttributeValueList(attributeValueList);
rangeKeyCondition.setComparisonOperator(ComparisonOperator.BETWEEN);
query.setRangeKeyCondition(rangeKeyCondition);
List<TestClassWithHashRangeKey> valueList = mapper.query(TestClassWithHashRangeKey.class, query);
Assert.assertNotNull("Value list is null.", valueList);
Assert.assertEquals("Value list should be empty.", 0, valueList.size());
}
@Test
public void deleteHashItemTest()
{
putItemWithHashKey();
putItemWithHashKeyOverwriteItem();
String code = "hash1";
TestClassWithHashKey value = mapper.load(TestClassWithHashKey.class, code);
Assert.assertNotNull("Value not found.", value);
Assert.assertEquals("Wrong code.", code, value.getCode());
Assert.assertEquals("Wrong stringData.", "string1", value.getStringData());
Assert.assertEquals("Wrong intData.", 1, value.getIntData());
mapper.delete(value);
TestClassWithHashKey value2 = mapper.load(TestClassWithHashKey.class, code);
Assert.assertNull("Value2 should not be found.", value2);
}
@Test
public void deleteHashRangeItemTest()
{
putItemWithHashKeyAndRangeKey();
putItemWithHashKeyAndRangeKeyOverwriteItem();
String hashCode = "hash2";
String rangeCode = "range2";
TestClassWithHashRangeKey value = mapper.load(TestClassWithHashRangeKey.class, hashCode, rangeCode);
Assert.assertNotNull("Value not found.", value);
Assert.assertEquals("Wrong hashCode.", hashCode, value.getHashCode());
Assert.assertEquals("Wrong rangeCode.", rangeCode, value.getRangeCode());
Assert.assertEquals("Wrong stringData.", "string2b", value.getStringData());
Assert.assertEquals("Wrong intData.", 22, value.getIntData());
mapper.delete(value);
TestClassWithHashRangeKey value2 = mapper.load(TestClassWithHashRangeKey.class, hashCode, rangeCode);
Assert.assertNull("Value2 should not be found.", value2);
}
@Test
public void scanIndexForwardFalseTest() {
KeySchema schema = new KeySchema(new KeySchemaElement().withAttributeName("hashCode").withAttributeType(ScalarAttributeType.S));
schema.setRangeKeyElement(new KeySchemaElement().withAttributeName("rangeCode").withAttributeType(ScalarAttributeType.S));
createTable(hashRangeTableName, schema);
{
TestClassWithHashRangeKey c = new TestClassWithHashRangeKey();
c.setHashCode("code");
c.setRangeCode("1");
c.setStringData("first");
mapper.save(c);
}
{
TestClassWithHashRangeKey c = new TestClassWithHashRangeKey();
c.setHashCode("code");
c.setRangeCode("2");
c.setStringData("second");
mapper.save(c);
}
TestClassWithHashRangeKey res = mapper.query(TestClassWithHashRangeKey.class, new DynamoDBQueryExpression(new AttributeValue("code")).withScanIndexForward(false).withLimit(1)).get(0);
Assert.assertEquals("second", res.getStringData());
}
@Test
public void limitTest() {
KeySchema schema = new KeySchema(new KeySchemaElement().withAttributeName("hashCode").withAttributeType(ScalarAttributeType.S));
schema.setRangeKeyElement(new KeySchemaElement().withAttributeName("rangeCode").withAttributeType(ScalarAttributeType.S));
createTable(hashRangeTableName, schema);
for (int i = 0; i < 10; i++) {
TestClassWithHashRangeKey c = new TestClassWithHashRangeKey();
c.setHashCode("code");
c.setRangeCode(i + "");
mapper.save(c);
}
Assert.assertEquals(1, mapper.query(TestClassWithHashRangeKey.class, new DynamoDBQueryExpression(new AttributeValue("code")).withLimit(1)).size());
Assert.assertEquals(3, mapper.query(TestClassWithHashRangeKey.class, new DynamoDBQueryExpression(new AttributeValue("code")).withLimit(3)).size());
Assert.assertEquals(10, mapper.query(TestClassWithHashRangeKey.class, new DynamoDBQueryExpression(new AttributeValue("code")).withLimit(20)).size());
}
@Test
public void utf8Test() {
KeySchema schema = new KeySchema(new KeySchemaElement().withAttributeName("code").withAttributeType(ScalarAttributeType.S));
createTable(hashTableName, schema);
TestClassWithHashKey value = new TestClassWithHashKey();
value.setCode("éáűőúöüóí");
value.setStringData("űáéúőóüöí");
mapper.save(value);
TestClassWithHashKey readValue = mapper.load(TestClassWithHashKey.class, "éáűőúöüóí");
Assert.assertEquals("éáűőúöüóí", readValue.getCode());
Assert.assertEquals("űáéúőóüöí", readValue.getStringData());
}
}
| jentfoo/Alternator | src/test/java/com/michelboudreau/test/AlternatorMapperTest.java | Java | apache-2.0 | 17,315 |
/*
Copyright 2011 Michael Edwards
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace Glass.Sitecore.Mapper.Configuration
{
[Flags]
public enum SitecoreFieldSettings
{
/// <summary>
/// The field carries out its default behaviour
/// </summary>
[DisplayName("Default")]
Default = 0x0,
/// <summary>
/// If used on a Rich Text field it stops the contents going through the render process
/// and returns the raw HTML of the field
/// </summary>
[DisplayName("Rich text raw")]
RichTextRaw = 0x1,
/// <summary>
/// If the property type is another classes loaded by the Mapper, indicates that the class should not be lazy loaded.
/// </summary>
[DisplayName("Don't load lazily")]
DontLoadLazily = 0x2,
/// <summary>
/// Indicates the type should be inferred from the item template
/// </summary>
[DisplayName("Infer type")]
InferType = 0x4
}
}
| Glass-lu/Glass.Sitecore.Mapper | Source/Glass.Sitecore.Mapper/Configuration/SitecoreFieldSettings.cs | C# | apache-2.0 | 1,636 |
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package cacheperf.comparisons.replicated.execute;
import com.gemstone.gemfire.cache.execute.Function;
import hydra.AbstractDescription;
import hydra.BasePrms;
import hydra.HydraConfigException;
/**
* A class used to store keys for test configuration settings.
*/
public class ExecutePrms extends BasePrms {
/**
* (String)
* Class name of function to register with the function execution service.
* Assumes that the class has a default no-argument constructor. Required.
*/
public static Long function;
public static Function getFunction() {
Long key = function;
String classname = tasktab().stringAt(key, tab().stringAt(key));
try {
return (Function)AbstractDescription.getInstance(key, classname);
} catch (ClassCastException e) {
String s = BasePrms.nameForKey(key)
+ " does not implement Function: " + classname;
throw new HydraConfigException(s);
}
}
//----------------------------------------------------------------------------
// Required stuff
//----------------------------------------------------------------------------
static {
setValues(ExecutePrms.class);
}
}
| SnappyDataInc/snappy-store | tests/core/src/main/java/cacheperf/comparisons/replicated/execute/ExecutePrms.java | Java | apache-2.0 | 1,840 |
package org.semanticweb.elk.reasoner.indexing.model;
/*
* #%L
* ELK Reasoner
* $Id:$
* $HeadURL:$
* %%
* Copyright (C) 2011 - 2015 Department of Computer Science, University of Oxford
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
/**
* A {@link ModifiableIndexedObjectHasSelf} that can be used for memoization
* (caching).
*
* @author "Yevgeny Kazakov"
*/
public interface CachedIndexedObjectHasSelf extends
ModifiableIndexedObjectHasSelf,
CachedIndexedComplexClassExpression<CachedIndexedObjectHasSelf> {
/**
* A factory for creating instances
*
* @author Yevgeny Kazakov
*
*/
interface Factory {
CachedIndexedObjectHasSelf getIndexedObjectHasSelf(
ModifiableIndexedObjectProperty property);
}
/**
* A filter for mapping objects
*
* @author Yevgeny Kazakov
*
*/
interface Filter {
CachedIndexedObjectHasSelf filter(CachedIndexedObjectHasSelf element);
}
static class Helper extends CachedIndexedObject.Helper {
public static int structuralHashCode(IndexedObjectProperty property) {
return combinedHashCode(CachedIndexedObjectHasSelf.class, property);
}
public static CachedIndexedObjectHasSelf structuralEquals(
CachedIndexedObjectHasSelf first, Object second) {
if (first == second) {
return first;
}
if (second instanceof CachedIndexedObjectHasSelf) {
CachedIndexedObjectHasSelf secondEntry = (CachedIndexedObjectHasSelf) second;
if (first.getProperty().equals(secondEntry.getProperty()))
return secondEntry;
}
// else
return null;
}
}
}
| liveontologies/elk-reasoner | elk-reasoner/src/main/java/org/semanticweb/elk/reasoner/indexing/model/CachedIndexedObjectHasSelf.java | Java | apache-2.0 | 2,089 |
/*
Copyright 2011-2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.google.security.zynamics.binnavi.debug.debugger;
import com.google.common.base.Preconditions;
import com.google.security.zynamics.binnavi.debug.debugger.interfaces.IDebugger;
import com.google.security.zynamics.binnavi.debug.models.breakpoints.BreakpointAddress;
import com.google.security.zynamics.binnavi.disassembly.RelocatedAddress;
import com.google.security.zynamics.binnavi.disassembly.UnrelocatedAddress;
import com.google.security.zynamics.binnavi.disassembly.INaviModule;
/**
* Contains a few helper functions for working with debuggers.
*/
public final class DebuggerHelpers {
/**
* You are not supposed to instantiate this class.
*/
private DebuggerHelpers() {
}
/**
* Converts a memory address into a breakpoint address.
*
* @param debugger The debugger which handles the breakpoint.
* @param memoryAddress The memory address to convert.
*
* @return The breakpoint address.
*/
public static BreakpointAddress getBreakpointAddress(
final IDebugger debugger, final RelocatedAddress memoryAddress) {
Preconditions.checkNotNull(debugger, "IE00161: Debugger argument can not be null");
Preconditions.checkNotNull(memoryAddress, "IE00163: Memory address argument can not be null");
final INaviModule module = debugger.getModule(memoryAddress);
return new BreakpointAddress(module, module == null ? new UnrelocatedAddress(memoryAddress
.getAddress()) : debugger.memoryToFile(module, memoryAddress));
}
}
| AmesianX/binnavi | src/main/java/com/google/security/zynamics/binnavi/debug/debugger/DebuggerHelpers.java | Java | apache-2.0 | 2,080 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_80) on Wed Oct 12 20:49:58 CEST 2016 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Class org.apache.flume.sink.elasticsearch.client.RoundRobinList (Apache Flume 1.7.0 API)</title>
<meta name="date" content="2016-10-12">
<link rel="stylesheet" type="text/css" href="../../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.flume.sink.elasticsearch.client.RoundRobinList (Apache Flume 1.7.0 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/apache/flume/sink/elasticsearch/client/RoundRobinList.html" title="class in org.apache.flume.sink.elasticsearch.client">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/apache/flume/sink/elasticsearch/client/class-use/RoundRobinList.html" target="_top">Frames</a></li>
<li><a href="RoundRobinList.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.apache.flume.sink.elasticsearch.client.RoundRobinList" class="title">Uses of Class<br>org.apache.flume.sink.elasticsearch.client.RoundRobinList</h2>
</div>
<div class="classUseContainer">No usage of org.apache.flume.sink.elasticsearch.client.RoundRobinList</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/apache/flume/sink/elasticsearch/client/RoundRobinList.html" title="class in org.apache.flume.sink.elasticsearch.client">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/apache/flume/sink/elasticsearch/client/class-use/RoundRobinList.html" target="_top">Frames</a></li>
<li><a href="RoundRobinList.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2009-2016 <a href="http://www.apache.org">Apache Software Foundation</a>. All Rights Reserved.</small></p>
</body>
</html>
| wangchuande/apache-flume-1.7.0 | docs/apidocs/org/apache/flume/sink/elasticsearch/client/class-use/RoundRobinList.html | HTML | apache-2.0 | 4,713 |
//*****************************************************************************
//*
//* (c) Copyright 2002. Glub Tech, Incorporated. All Rights Reserved.
//*
//* $Id: HostInfo.java 37 2009-05-11 22:46:15Z gary $
//*
//*****************************************************************************
package com.glub.secureftp.bean;
import java.net.*;
/**
* The <code>HostInfo</code> class is responsible for holding host information.
*
* @author Gary Cohen
* @version $Revision: 47 $, $Date: 2009-05-16 10:10:12 -0700 (Sat, 16 May 2009) $
* @since 2.0
*/
public class HostInfo {
/** The hostname */
private InetAddress hostName;
/** The port */
private int port;
/** Create an empty <code>HostInfo</code> object. */
public HostInfo() {
this( (InetAddress)null, 0 );
}
/**
* Create a new <code>HostInfo</code> object.
*
* @param hostname the hostname.
* @param port the port.
*/
public HostInfo( InetAddress hostname, int port ) {
setHostName( hostname );
setPort( port );
}
/**
* Create a new <code>HostInfo</code> object.
*
* @param hostname the hostname.
* @param port the port.
*/
public HostInfo( String hostname, int port ) throws UnknownHostException {
setHostName( hostname );
setPort( port );
}
/**
* Get the <code>InetAddress</code> of the host.
*
* @return the <code>InetAddress</code>.
*/
public InetAddress getInetAddress() {
return hostName;
}
/**
* Get the hostname of the host.
*
* @return the hostname.
*/
public String getHostName() {
return hostName.getHostName();
}
/**
* Get the IP address of the host.
*
* @return the IP address.
*/
public String getHostAddress() {
return hostName.getHostAddress();
}
/**
* Set the hostname.
*
* @param hostname the hostname.
*/
public void setHostName( InetAddress hostname ) {
this.hostName = hostname;
}
/**
* Set the hostname.
*
* @param hostname the hostname.
*/
public void setHostName( String hostname ) throws UnknownHostException {
this.hostName = InetAddress.getByName( hostname );
}
/**
* Get the port of the host.
*
* @return the port.
*/
public int getPort() {
return port;
}
/**
* Set the port.
*
* @param port the port.
*/
public void setPort( int port ) {
this.port = port;
}
}
| glubtech/secureftp | src/com/glub/secureftp/bean/HostInfo.java | Java | apache-2.0 | 2,421 |
puremvc.define({
name: 'game.controller.StartupCommand',
parent: puremvc.MacroCommand
},
// INSTANCE MEMBERS
{
/**
* Add the sub-commands for this MacroCommand
* @override
*/
initializeMacroCommand: function () {
this.addSubCommand( game.controller.StartupControllerCommand );
this.addSubCommand( game.controller.StartupModelCommand );
this.addSubCommand( game.controller.StartupViewCommand );
}
}
);
| Cenfee/nucleon-cocos | client/Nucleon/src/game/controller/StartupCommand.js | JavaScript | apache-2.0 | 527 |
<html>
<body>
<h2>Log Viewers.</h2>
</body>
</html> | joobn72/qi4j-sdk | libraries/logging/src/main/java/org/qi4j/logging/view/package.html | HTML | apache-2.0 | 67 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.recovery;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.recovery.RecoveryState.Stage;
import org.elasticsearch.indices.recovery.RecoveryState.Type;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.store.MockFSDirectoryService;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.not;
/**
*
*/
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
public class IndexRecoveryIT extends ESIntegTestCase {
private static final String INDEX_NAME = "test-idx-1";
private static final String INDEX_TYPE = "test-type-1";
private static final String REPO_NAME = "test-repo-1";
private static final String SNAP_NAME = "test-snap-1";
private static final int MIN_DOC_COUNT = 500;
private static final int MAX_DOC_COUNT = 1000;
private static final int SHARD_COUNT = 1;
private static final int REPLICA_COUNT = 0;
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return pluginList(MockTransportService.TestPlugin.class);
}
private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, Type type,
String sourceNode, String targetNode, boolean hasRestoreSource) {
assertThat(state.getShardId().getId(), equalTo(shardId));
assertThat(state.getType(), equalTo(type));
if (sourceNode == null) {
assertNull(state.getSourceNode());
} else {
assertNotNull(state.getSourceNode());
assertThat(state.getSourceNode().getName(), equalTo(sourceNode));
}
if (targetNode == null) {
assertNull(state.getTargetNode());
} else {
assertNotNull(state.getTargetNode());
assertThat(state.getTargetNode().getName(), equalTo(targetNode));
}
if (hasRestoreSource) {
assertNotNull(state.getRestoreSource());
} else {
assertNull(state.getRestoreSource());
}
}
private void assertRecoveryState(RecoveryState state, int shardId, Type type, Stage stage,
String sourceNode, String targetNode, boolean hasRestoreSource) {
assertRecoveryStateWithoutStage(state, shardId, type, sourceNode, targetNode, hasRestoreSource);
assertThat(state.getStage(), equalTo(stage));
}
private void assertOnGoingRecoveryState(RecoveryState state, int shardId, Type type,
String sourceNode, String targetNode, boolean hasRestoreSource) {
assertRecoveryStateWithoutStage(state, shardId, type, sourceNode, targetNode, hasRestoreSource);
assertThat(state.getStage(), not(equalTo(Stage.DONE)));
}
private void slowDownRecovery(ByteSizeValue shardSize) {
long chunkSize = Math.max(1, shardSize.bytes() / 10);
for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) {
setChunkSize(settings, new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES));
}
assertTrue(client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder()
// one chunk per sec..
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES)
)
.get().isAcknowledged());
}
private void restoreRecoverySpeed() {
for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) {
setChunkSize(settings, RecoverySettings.DEFAULT_CHUNK_SIZE);
}
assertTrue(client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder()
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb")
)
.get().isAcknowledged());
}
public void testGatewayRecovery() throws Exception {
logger.info("--> start nodes");
String node = internalCluster().startNode();
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> restarting cluster");
internalCluster().fullRestart();
ensureGreen();
logger.info("--> request recoveries");
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT));
assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1));
List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(1));
RecoveryState recoveryState = recoveryStates.get(0);
assertRecoveryState(recoveryState, 0, Type.STORE, Stage.DONE, node, node, false);
validateIndexRecoveryState(recoveryState.getIndex());
}
public void testGatewayRecoveryTestActiveOnly() throws Exception {
logger.info("--> start nodes");
internalCluster().startNode();
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> restarting cluster");
internalCluster().fullRestart();
ensureGreen();
logger.info("--> request recoveries");
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).setActiveOnly(true).execute().actionGet();
List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(0)); // Should not expect any responses back
}
public void testReplicaRecovery() throws Exception {
logger.info("--> start node A");
String nodeA = internalCluster().startNode();
logger.info("--> create index on node: {}", nodeA);
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> start node B");
String nodeB = internalCluster().startNode();
ensureGreen();
// force a shard recovery from nodeA to nodeB
logger.info("--> bump replica count");
client().admin().indices().prepareUpdateSettings(INDEX_NAME)
.setSettings(settingsBuilder().put("number_of_replicas", 1)).execute().actionGet();
ensureGreen();
logger.info("--> request recoveries");
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
// we should now have two total shards, one primary and one replica
List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(2));
List<RecoveryState> nodeAResponses = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeAResponses.size(), equalTo(1));
List<RecoveryState> nodeBResponses = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBResponses.size(), equalTo(1));
// validate node A recovery
RecoveryState nodeARecoveryState = nodeAResponses.get(0);
assertRecoveryState(nodeARecoveryState, 0, Type.STORE, Stage.DONE, nodeA, nodeA, false);
validateIndexRecoveryState(nodeARecoveryState.getIndex());
// validate node B recovery
RecoveryState nodeBRecoveryState = nodeBResponses.get(0);
assertRecoveryState(nodeBRecoveryState, 0, Type.REPLICA, Stage.DONE, nodeA, nodeB, false);
validateIndexRecoveryState(nodeBRecoveryState.getIndex());
}
@TestLogging("indices.recovery:TRACE")
public void testRerouteRecovery() throws Exception {
logger.info("--> start node A");
final String nodeA = internalCluster().startNode();
logger.info("--> create index on node: {}", nodeA);
ByteSizeValue shardSize = createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT).getShards()[0].getStats().getStore().size();
logger.info("--> start node B");
final String nodeB = internalCluster().startNode();
ensureGreen();
logger.info("--> slowing down recoveries");
slowDownRecovery(shardSize);
logger.info("--> move shard from: {} to: {}", nodeA, nodeB);
client().admin().cluster().prepareReroute()
.add(new MoveAllocationCommand(new ShardId(INDEX_NAME, 0), nodeA, nodeB))
.execute().actionGet().getState();
logger.info("--> waiting for recovery to start both on source and target");
assertBusy(new Runnable() {
@Override
public void run() {
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA);
assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsSource(),
equalTo(1));
indicesService = internalCluster().getInstance(IndicesService.class, nodeB);
assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsTarget(),
equalTo(1));
}
});
logger.info("--> request recoveries");
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
List<RecoveryState> nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeARecoveryStates.size(), equalTo(1));
List<RecoveryState> nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.STORE, Stage.DONE, nodeA, nodeA, false);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, nodeA, nodeB, false);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
logger.info("--> request node recovery stats");
NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
long nodeAThrottling = Long.MAX_VALUE;
long nodeBThrottling = Long.MAX_VALUE;
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
if (nodeStats.getNode().name().equals(nodeA)) {
assertThat("node A should have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(1));
assertThat("node A should not have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(0));
nodeAThrottling = recoveryStats.throttleTime().millis();
}
if (nodeStats.getNode().name().equals(nodeB)) {
assertThat("node B should not have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(0));
assertThat("node B should have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(1));
nodeBThrottling = recoveryStats.throttleTime().millis();
}
}
logger.info("--> checking throttling increases");
final long finalNodeAThrottling = nodeAThrottling;
final long finalNodeBThrottling = nodeBThrottling;
assertBusy(new Runnable() {
@Override
public void run() {
NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
assertThat(statsResponse.getNodes(), arrayWithSize(2));
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
if (nodeStats.getNode().name().equals(nodeA)) {
assertThat("node A throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeAThrottling));
}
if (nodeStats.getNode().name().equals(nodeB)) {
assertThat("node B throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeBThrottling));
}
}
}
});
logger.info("--> speeding up recoveries");
restoreRecoverySpeed();
// wait for it to be finished
ensureGreen();
response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(1));
assertRecoveryState(recoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
validateIndexRecoveryState(recoveryStates.get(0).getIndex());
statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
assertThat(statsResponse.getNodes(), arrayWithSize(2));
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
assertThat(recoveryStats.currentAsSource(), equalTo(0));
assertThat(recoveryStats.currentAsTarget(), equalTo(0));
if (nodeStats.getNode().name().equals(nodeA)) {
assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0l));
}
if (nodeStats.getNode().name().equals(nodeB)) {
assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0l));
}
}
logger.info("--> bump replica count");
client().admin().indices().prepareUpdateSettings(INDEX_NAME)
.setSettings(settingsBuilder().put("number_of_replicas", 1)).execute().actionGet();
ensureGreen();
statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
assertThat(statsResponse.getNodes(), arrayWithSize(2));
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
assertThat(recoveryStats.currentAsSource(), equalTo(0));
assertThat(recoveryStats.currentAsTarget(), equalTo(0));
if (nodeStats.getNode().name().equals(nodeA)) {
assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0l));
}
if (nodeStats.getNode().name().equals(nodeB)) {
assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0l));
}
}
logger.info("--> start node C");
String nodeC = internalCluster().startNode();
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").get().isTimedOut());
logger.info("--> slowing down recoveries");
slowDownRecovery(shardSize);
logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC);
client().admin().cluster().prepareReroute()
.add(new MoveAllocationCommand(new ShardId(INDEX_NAME, 0), nodeA, nodeC))
.execute().actionGet().getState();
response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeARecoveryStates.size(), equalTo(1));
nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
List<RecoveryState> nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeA, false);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, Type.REPLICA, nodeB, nodeC, false);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
logger.info("--> speeding up recoveries");
restoreRecoverySpeed();
ensureGreen();
response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeARecoveryStates.size(), equalTo(0));
nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
assertRecoveryState(nodeCRecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeC, false);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
}
public void testSnapshotRecovery() throws Exception {
logger.info("--> start node A");
String nodeA = internalCluster().startNode();
logger.info("--> create repository");
assertAcked(client().admin().cluster().preparePutRepository(REPO_NAME)
.setType("fs").setSettings(Settings.settingsBuilder()
.put("location", randomRepoPath())
.put("compress", false)
).get());
ensureGreen();
logger.info("--> create index on node: {}", nodeA);
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPO_NAME, SNAP_NAME)
.setWaitForCompletion(true).setIndices(INDEX_NAME).get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
assertThat(client().admin().cluster().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get()
.getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
client().admin().indices().prepareClose(INDEX_NAME).execute().actionGet();
logger.info("--> restore");
RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster()
.prepareRestoreSnapshot(REPO_NAME, SNAP_NAME).setWaitForCompletion(true).execute().actionGet();
int totalShards = restoreSnapshotResponse.getRestoreInfo().totalShards();
assertThat(totalShards, greaterThan(0));
ensureGreen();
logger.info("--> request recoveries");
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
for (Map.Entry<String, List<RecoveryState>> indexRecoveryStates : response.shardRecoveryStates().entrySet()) {
assertThat(indexRecoveryStates.getKey(), equalTo(INDEX_NAME));
List<RecoveryState> recoveryStates = indexRecoveryStates.getValue();
assertThat(recoveryStates.size(), equalTo(totalShards));
for (RecoveryState recoveryState : recoveryStates) {
assertRecoveryState(recoveryState, 0, Type.SNAPSHOT, Stage.DONE, null, nodeA, true);
validateIndexRecoveryState(recoveryState.getIndex());
}
}
}
private List<RecoveryState> findRecoveriesForTargetNode(String nodeName, List<RecoveryState> recoveryStates) {
List<RecoveryState> nodeResponses = new ArrayList<>();
for (RecoveryState recoveryState : recoveryStates) {
if (recoveryState.getTargetNode().getName().equals(nodeName)) {
nodeResponses.add(recoveryState);
}
}
return nodeResponses;
}
private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, int shardCount, int replicaCount)
throws ExecutionException, InterruptedException {
logger.info("--> creating test index: {}", name);
assertAcked(prepareCreate(name, nodeCount, settingsBuilder().put("number_of_shards", shardCount)
.put("number_of_replicas", replicaCount).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0)));
ensureGreen();
logger.info("--> indexing sample data");
final int numDocs = between(MIN_DOC_COUNT, MAX_DOC_COUNT);
final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex(INDEX_NAME, INDEX_TYPE).
setSource("foo-int", randomInt(),
"foo-string", randomAsciiOfLength(32),
"foo-float", randomFloat());
}
indexRandom(true, docs);
flush();
assertThat(client().prepareSearch(INDEX_NAME).setSize(0).get().getHits().totalHits(), equalTo((long) numDocs));
return client().admin().indices().prepareStats(INDEX_NAME).execute().actionGet();
}
private void validateIndexRecoveryState(RecoveryState.Index indexState) {
assertThat(indexState.time(), greaterThanOrEqualTo(0L));
assertThat(indexState.recoveredFilesPercent(), greaterThanOrEqualTo(0.0f));
assertThat(indexState.recoveredFilesPercent(), lessThanOrEqualTo(100.0f));
assertThat(indexState.recoveredBytesPercent(), greaterThanOrEqualTo(0.0f));
assertThat(indexState.recoveredBytesPercent(), lessThanOrEqualTo(100.0f));
}
public void testDisconnectsWhileRecovering() throws Exception {
final String indexName = "test";
final Settings nodeSettings = Settings.builder()
.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms")
.put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "1s")
.put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again
.build();
// start a master node
internalCluster().startNode(nodeSettings);
InternalTestCluster.Async<String> blueFuture = internalCluster().startNodeAsync(Settings.builder().put("node.color", "blue").put(nodeSettings).build());
InternalTestCluster.Async<String> redFuture = internalCluster().startNodeAsync(Settings.builder().put("node.color", "red").put(nodeSettings).build());
final String blueNodeName = blueFuture.get();
final String redNodeName = redFuture.get();
ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get();
assertThat(response.isTimedOut(), is(false));
client().admin().indices().prepareCreate(indexName)
.setSettings(
Settings.builder()
.put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
).get();
List<IndexRequestBuilder> requests = new ArrayList<>();
int numDocs = scaledRandomIntBetween(25, 250);
for (int i = 0; i < numDocs; i++) {
requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}"));
}
indexRandom(true, requests);
ensureSearchable(indexName);
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
final String blueNodeId = internalCluster().getInstance(DiscoveryService.class, blueNodeName).localNode().id();
assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty());
SearchResponse searchResponse = client().prepareSearch(indexName).get();
assertHitCount(searchResponse, numDocs);
String[] recoveryActions = new String[]{
RecoverySource.Actions.START_RECOVERY,
RecoveryTarget.Actions.FILES_INFO,
RecoveryTarget.Actions.FILE_CHUNK,
RecoveryTarget.Actions.CLEAN_FILES,
//RecoveryTarget.Actions.TRANSLOG_OPS, <-- may not be sent if already flushed
RecoveryTarget.Actions.PREPARE_TRANSLOG,
RecoveryTarget.Actions.FINALIZE
};
final String recoveryActionToBlock = randomFrom(recoveryActions);
final boolean dropRequests = randomBoolean();
logger.info("--> will {} between blue & red on [{}]", dropRequests ? "drop requests" : "break connection", recoveryActionToBlock);
MockTransportService blueMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName);
MockTransportService redMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, redNodeName);
TransportService redTransportService = internalCluster().getInstance(TransportService.class, redNodeName);
TransportService blueTransportService = internalCluster().getInstance(TransportService.class, blueNodeName);
final CountDownLatch requestBlocked = new CountDownLatch(1);
blueMockTransportService.addDelegate(redTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, blueMockTransportService.original(), requestBlocked));
redMockTransportService.addDelegate(blueTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, redMockTransportService.original(), requestBlocked));
logger.info("--> starting recovery from blue to red");
client().admin().indices().prepareUpdateSettings(indexName).setSettings(
Settings.builder()
.put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red,blue")
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
).get();
requestBlocked.await();
logger.info("--> stopping to block recovery");
blueMockTransportService.clearAllRules();
redMockTransportService.clearAllRules();
ensureGreen();
searchResponse = client(redNodeName).prepareSearch(indexName).setPreference("_local").get();
assertHitCount(searchResponse, numDocs);
}
private class RecoveryActionBlocker extends MockTransportService.DelegateTransport {
private final boolean dropRequests;
private final String recoveryActionToBlock;
private final CountDownLatch requestBlocked;
public RecoveryActionBlocker(boolean dropRequests, String recoveryActionToBlock, Transport delegate, CountDownLatch requestBlocked) {
super(delegate);
this.dropRequests = dropRequests;
this.recoveryActionToBlock = recoveryActionToBlock;
this.requestBlocked = requestBlocked;
}
@Override
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
if (recoveryActionToBlock.equals(action) || requestBlocked.getCount() == 0) {
logger.info("--> preventing {} request", action);
requestBlocked.countDown();
if (dropRequests) {
return;
}
throw new ConnectTransportException(node, "DISCONNECT: prevented " + action + " request");
}
transport.sendRequest(node, requestId, action, request, options);
}
}
public static void setChunkSize(RecoverySettings recoverySettings, ByteSizeValue chunksSize) {
recoverySettings.setChunkSize(chunksSize);
}
}
| martinstuga/elasticsearch | core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java | Java | apache-2.0 | 33,122 |
/*
* MPEG2 transport stream (aka DVB) muxer
* Copyright (c) 2003 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/bswap.h"
#include "libavutil/crc.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/avassert.h"
#include "libavcodec/internal.h"
#include "avformat.h"
#include "internal.h"
#include "mpegts.h"
#define PCR_TIME_BASE 27000000
/* write DVB SI sections */
/*********************************************/
/* mpegts section writer */
typedef struct MpegTSSection {
int pid;
int cc;
void (*write_packet)(struct MpegTSSection *s, const uint8_t *packet);
void *opaque;
} MpegTSSection;
typedef struct MpegTSService {
MpegTSSection pmt; /* MPEG2 pmt table context */
int sid; /* service ID */
char *name;
char *provider_name;
int pcr_pid;
int pcr_packet_count;
int pcr_packet_period;
} MpegTSService;
typedef struct MpegTSWrite {
const AVClass *av_class;
MpegTSSection pat; /* MPEG2 pat table */
MpegTSSection sdt; /* MPEG2 sdt table context */
MpegTSService **services;
int sdt_packet_count;
int sdt_packet_period;
int pat_packet_count;
int pat_packet_period;
int nb_services;
int onid;
int tsid;
int64_t first_pcr;
int mux_rate; ///< set to 1 when VBR
int pes_payload_size;
int transport_stream_id;
int original_network_id;
int service_id;
int pmt_start_pid;
int start_pid;
int m2ts_mode;
int reemit_pat_pmt; // backward compatibility
#define MPEGTS_FLAG_REEMIT_PAT_PMT 0x01
#define MPEGTS_FLAG_AAC_LATM 0x02
int flags;
int copyts;
int tables_version;
} MpegTSWrite;
/* a PES packet header is generated every DEFAULT_PES_HEADER_FREQ packets */
#define DEFAULT_PES_HEADER_FREQ 16
#define DEFAULT_PES_PAYLOAD_SIZE ((DEFAULT_PES_HEADER_FREQ - 1) * 184 + 170)
static const AVOption options[] = {
{ "mpegts_transport_stream_id", "Set transport_stream_id field.",
offsetof(MpegTSWrite, transport_stream_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_original_network_id", "Set original_network_id field.",
offsetof(MpegTSWrite, original_network_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_service_id", "Set service_id field.",
offsetof(MpegTSWrite, service_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_pmt_start_pid", "Set the first pid of the PMT.",
offsetof(MpegTSWrite, pmt_start_pid), AV_OPT_TYPE_INT, {.i64 = 0x1000 }, 0x0010, 0x1f00, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_start_pid", "Set the first pid.",
offsetof(MpegTSWrite, start_pid), AV_OPT_TYPE_INT, {.i64 = 0x0100 }, 0x0100, 0x0f00, AV_OPT_FLAG_ENCODING_PARAM},
{"mpegts_m2ts_mode", "Enable m2ts mode.",
offsetof(MpegTSWrite, m2ts_mode), AV_OPT_TYPE_INT, {.i64 = -1 },
-1,1, AV_OPT_FLAG_ENCODING_PARAM},
{ "muxrate", NULL, offsetof(MpegTSWrite, mux_rate), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "pes_payload_size", "Minimum PES packet payload in bytes",
offsetof(MpegTSWrite, pes_payload_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT_PES_PAYLOAD_SIZE}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_flags", "MPEG-TS muxing flags", offsetof(MpegTSWrite, flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX,
AV_OPT_FLAG_ENCODING_PARAM, "mpegts_flags" },
{ "resend_headers", "Reemit PAT/PMT before writing the next packet",
0, AV_OPT_TYPE_CONST, {.i64 = MPEGTS_FLAG_REEMIT_PAT_PMT}, 0, INT_MAX,
AV_OPT_FLAG_ENCODING_PARAM, "mpegts_flags"},
{ "latm", "Use LATM packetization for AAC",
0, AV_OPT_TYPE_CONST, {.i64 = MPEGTS_FLAG_AAC_LATM}, 0, INT_MAX,
AV_OPT_FLAG_ENCODING_PARAM, "mpegts_flags"},
// backward compatibility
{ "resend_headers", "Reemit PAT/PMT before writing the next packet",
offsetof(MpegTSWrite, reemit_pat_pmt), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "mpegts_copyts", "don't offset dts/pts",
offsetof(MpegTSWrite, copyts), AV_OPT_TYPE_INT, {.i64=-1}, -1, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ "tables_version", "set PAT, PMT and SDT version",
offsetof(MpegTSWrite, tables_version), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
static const AVClass mpegts_muxer_class = {
.class_name = "MPEGTS muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
/* NOTE: 4 bytes must be left at the end for the crc32 */
static void mpegts_write_section(MpegTSSection *s, uint8_t *buf, int len)
{
unsigned int crc;
unsigned char packet[TS_PACKET_SIZE];
const unsigned char *buf_ptr;
unsigned char *q;
int first, b, len1, left;
crc = av_bswap32(av_crc(av_crc_get_table(AV_CRC_32_IEEE), -1, buf, len - 4));
buf[len - 4] = (crc >> 24) & 0xff;
buf[len - 3] = (crc >> 16) & 0xff;
buf[len - 2] = (crc >> 8) & 0xff;
buf[len - 1] = (crc) & 0xff;
/* send each packet */
buf_ptr = buf;
while (len > 0) {
first = (buf == buf_ptr);
q = packet;
*q++ = 0x47;
b = (s->pid >> 8);
if (first)
b |= 0x40;
*q++ = b;
*q++ = s->pid;
s->cc = (s->cc + 1) & 0xf;
*q++ = 0x10 | s->cc;
if (first)
*q++ = 0; /* 0 offset */
len1 = TS_PACKET_SIZE - (q - packet);
if (len1 > len)
len1 = len;
memcpy(q, buf_ptr, len1);
q += len1;
/* add known padding data */
left = TS_PACKET_SIZE - (q - packet);
if (left > 0)
memset(q, 0xff, left);
s->write_packet(s, packet);
buf_ptr += len1;
len -= len1;
}
}
static inline void put16(uint8_t **q_ptr, int val)
{
uint8_t *q;
q = *q_ptr;
*q++ = val >> 8;
*q++ = val;
*q_ptr = q;
}
static int mpegts_write_section1(MpegTSSection *s, int tid, int id,
int version, int sec_num, int last_sec_num,
uint8_t *buf, int len)
{
uint8_t section[1024], *q;
unsigned int tot_len;
/* reserved_future_use field must be set to 1 for SDT */
unsigned int flags = tid == SDT_TID ? 0xf000 : 0xb000;
tot_len = 3 + 5 + len + 4;
/* check if not too big */
if (tot_len > 1024)
return AVERROR_INVALIDDATA;
q = section;
*q++ = tid;
put16(&q, flags | (len + 5 + 4)); /* 5 byte header + 4 byte CRC */
put16(&q, id);
*q++ = 0xc1 | (version << 1); /* current_next_indicator = 1 */
*q++ = sec_num;
*q++ = last_sec_num;
memcpy(q, buf, len);
mpegts_write_section(s, section, tot_len);
return 0;
}
/*********************************************/
/* mpegts writer */
#define DEFAULT_PROVIDER_NAME "FFmpeg"
#define DEFAULT_SERVICE_NAME "Service01"
/* we retransmit the SI info at this rate */
#define SDT_RETRANS_TIME 500
#define PAT_RETRANS_TIME 100
#define PCR_RETRANS_TIME 20
typedef struct MpegTSWriteStream {
struct MpegTSService *service;
int pid; /* stream associated pid */
int cc;
int payload_size;
int first_pts_check; ///< first pts check needed
int prev_payload_key;
int64_t payload_pts;
int64_t payload_dts;
int payload_flags;
uint8_t *payload;
AVFormatContext *amux;
} MpegTSWriteStream;
static void mpegts_write_pat(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSService *service;
uint8_t data[1012], *q;
int i;
q = data;
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
put16(&q, service->sid);
put16(&q, 0xe000 | service->pmt.pid);
}
mpegts_write_section1(&ts->pat, PAT_TID, ts->tsid, ts->tables_version, 0, 0,
data, q - data);
}
static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
{
MpegTSWrite *ts = s->priv_data;
uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr;
int val, stream_type, i;
q = data;
put16(&q, 0xe000 | service->pcr_pid);
program_info_length_ptr = q;
q += 2; /* patched after */
/* put program info here */
val = 0xf000 | (q - program_info_length_ptr - 2);
program_info_length_ptr[0] = val >> 8;
program_info_length_ptr[1] = val;
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0);
switch(st->codec->codec_id) {
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
stream_type = STREAM_TYPE_VIDEO_MPEG2;
break;
case AV_CODEC_ID_MPEG4:
stream_type = STREAM_TYPE_VIDEO_MPEG4;
break;
case AV_CODEC_ID_H264:
stream_type = STREAM_TYPE_VIDEO_H264;
break;
case AV_CODEC_ID_HEVC:
stream_type = STREAM_TYPE_VIDEO_HEVC;
break;
case AV_CODEC_ID_CAVS:
stream_type = STREAM_TYPE_VIDEO_CAVS;
break;
case AV_CODEC_ID_DIRAC:
stream_type = STREAM_TYPE_VIDEO_DIRAC;
break;
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
stream_type = STREAM_TYPE_AUDIO_MPEG1;
break;
case AV_CODEC_ID_AAC:
stream_type = (ts->flags & MPEGTS_FLAG_AAC_LATM) ? STREAM_TYPE_AUDIO_AAC_LATM : STREAM_TYPE_AUDIO_AAC;
break;
case AV_CODEC_ID_AAC_LATM:
stream_type = STREAM_TYPE_AUDIO_AAC_LATM;
break;
case AV_CODEC_ID_AC3:
stream_type = STREAM_TYPE_AUDIO_AC3;
break;
default:
stream_type = STREAM_TYPE_PRIVATE_DATA;
break;
}
if (q - data > sizeof(data) - 32)
return AVERROR(EINVAL);
*q++ = stream_type;
put16(&q, 0xe000 | ts_st->pid);
desc_length_ptr = q;
q += 2; /* patched after */
/* write optional descriptors here */
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if(st->codec->codec_id==AV_CODEC_ID_EAC3){
*q++=0x7a; // EAC3 descriptor see A038 DVB SI
*q++=1; // 1 byte, all flags sets to 0
*q++=0; // omit all fields...
}
if(st->codec->codec_id==AV_CODEC_ID_S302M){
*q++ = 0x05; /* MPEG-2 registration descriptor*/
*q++ = 4;
*q++ = 'B';
*q++ = 'S';
*q++ = 'S';
*q++ = 'D';
}
if (lang) {
char *p;
char *next = lang->value;
uint8_t *len_ptr;
*q++ = 0x0a; /* ISO 639 language descriptor */
len_ptr = q++;
*len_ptr = 0;
for (p = lang->value; next && *len_ptr < 255 / 4 * 4 && q - data < sizeof(data) - 4; p = next + 1) {
next = strchr(p, ',');
if (strlen(p) != 3 && (!next || next != p + 3))
continue; /* not a 3-letter code */
*q++ = *p++;
*q++ = *p++;
*q++ = *p++;
if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
*q++ = 0x01;
else if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
*q++ = 0x02;
else if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
*q++ = 0x03;
else
*q++ = 0; /* undefined type */
*len_ptr += 4;
}
if (*len_ptr == 0)
q -= 2; /* no language codes were written */
}
break;
case AVMEDIA_TYPE_SUBTITLE:
{
const char default_language[] = "und";
const char *language = lang && strlen(lang->value) >= 3 ? lang->value : default_language;
if (st->codec->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
uint8_t *len_ptr;
int extradata_copied = 0;
*q++ = 0x59; /* subtitling_descriptor */
len_ptr = q++;
while (strlen(language) >= 3 && (sizeof(data) - (q - data)) >= 8) { /* 8 bytes per DVB subtitle substream data */
*q++ = *language++;
*q++ = *language++;
*q++ = *language++;
/* Skip comma */
if (*language != '\0')
language++;
if (st->codec->extradata_size - extradata_copied >= 5) {
*q++ = st->codec->extradata[extradata_copied + 4]; /* subtitling_type */
memcpy(q, st->codec->extradata + extradata_copied, 4); /* composition_page_id and ancillary_page_id */
extradata_copied += 5;
q += 4;
} else {
/* subtitling_type:
* 0x10 - normal with no monitor aspect ratio criticality
* 0x20 - for the hard of hearing with no monitor aspect ratio criticality */
*q++ = (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) ? 0x20 : 0x10;
if ((st->codec->extradata_size == 4) && (extradata_copied == 0)) {
/* support of old 4-byte extradata format */
memcpy(q, st->codec->extradata, 4); /* composition_page_id and ancillary_page_id */
extradata_copied += 4;
q += 4;
} else {
put16(&q, 1); /* composition_page_id */
put16(&q, 1); /* ancillary_page_id */
}
}
}
*len_ptr = q - len_ptr - 1;
} else if (st->codec->codec_id == AV_CODEC_ID_DVB_TELETEXT) {
uint8_t *len_ptr = NULL;
int extradata_copied = 0;
/* The descriptor tag. teletext_descriptor */
*q++ = 0x56;
len_ptr = q++;
while (strlen(language) >= 3 && q - data < sizeof(data) - 6) {
*q++ = *language++;
*q++ = *language++;
*q++ = *language++;
/* Skip comma */
if (*language != '\0')
language++;
if (st->codec->extradata_size - 1 > extradata_copied) {
memcpy(q, st->codec->extradata + extradata_copied, 2);
extradata_copied += 2;
q += 2;
} else {
/* The Teletext descriptor:
* teletext_type: This 5-bit field indicates the type of Teletext page indicated. (0x01 Initial Teletext page)
* teletext_magazine_number: This is a 3-bit field which identifies the magazine number.
* teletext_page_number: This is an 8-bit field giving two 4-bit hex digits identifying the page number. */
*q++ = 0x08;
*q++ = 0x00;
}
}
*len_ptr = q - len_ptr - 1;
}
}
break;
case AVMEDIA_TYPE_VIDEO:
if (stream_type == STREAM_TYPE_VIDEO_DIRAC) {
*q++ = 0x05; /*MPEG-2 registration descriptor*/
*q++ = 4;
*q++ = 'd';
*q++ = 'r';
*q++ = 'a';
*q++ = 'c';
}
break;
case AVMEDIA_TYPE_DATA:
if (st->codec->codec_id == AV_CODEC_ID_SMPTE_KLV) {
*q++ = 0x05; /* MPEG-2 registration descriptor */
*q++ = 4;
*q++ = 'K';
*q++ = 'L';
*q++ = 'V';
*q++ = 'A';
}
break;
}
val = 0xf000 | (q - desc_length_ptr - 2);
desc_length_ptr[0] = val >> 8;
desc_length_ptr[1] = val;
}
mpegts_write_section1(&service->pmt, PMT_TID, service->sid, ts->tables_version, 0, 0,
data, q - data);
return 0;
}
/* NOTE: str == NULL is accepted for an empty string */
static void putstr8(uint8_t **q_ptr, const char *str)
{
uint8_t *q;
int len;
q = *q_ptr;
if (!str)
len = 0;
else
len = strlen(str);
*q++ = len;
memcpy(q, str, len);
q += len;
*q_ptr = q;
}
static void mpegts_write_sdt(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSService *service;
uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr;
int i, running_status, free_ca_mode, val;
q = data;
put16(&q, ts->onid);
*q++ = 0xff;
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
put16(&q, service->sid);
*q++ = 0xfc | 0x00; /* currently no EIT info */
desc_list_len_ptr = q;
q += 2;
running_status = 4; /* running */
free_ca_mode = 0;
/* write only one descriptor for the service name and provider */
*q++ = 0x48;
desc_len_ptr = q;
q++;
*q++ = 0x01; /* digital television service */
putstr8(&q, service->provider_name);
putstr8(&q, service->name);
desc_len_ptr[0] = q - desc_len_ptr - 1;
/* fill descriptor length */
val = (running_status << 13) | (free_ca_mode << 12) |
(q - desc_list_len_ptr - 2);
desc_list_len_ptr[0] = val >> 8;
desc_list_len_ptr[1] = val;
}
mpegts_write_section1(&ts->sdt, SDT_TID, ts->tsid, ts->tables_version, 0, 0,
data, q - data);
}
static MpegTSService *mpegts_add_service(MpegTSWrite *ts,
int sid,
const char *provider_name,
const char *name)
{
MpegTSService *service;
service = av_mallocz(sizeof(MpegTSService));
if (!service)
return NULL;
service->pmt.pid = ts->pmt_start_pid + ts->nb_services;
service->sid = sid;
service->provider_name = av_strdup(provider_name);
service->name = av_strdup(name);
service->pcr_pid = 0x1fff;
dynarray_add(&ts->services, &ts->nb_services, service);
return service;
}
static int64_t get_pcr(const MpegTSWrite *ts, AVIOContext *pb)
{
return av_rescale(avio_tell(pb) + 11, 8 * PCR_TIME_BASE, ts->mux_rate) +
ts->first_pcr;
}
static void mpegts_prefix_m2ts_header(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
if (ts->m2ts_mode) {
int64_t pcr = get_pcr(s->priv_data, s->pb);
uint32_t tp_extra_header = pcr % 0x3fffffff;
tp_extra_header = AV_RB32(&tp_extra_header);
avio_write(s->pb, (unsigned char *) &tp_extra_header,
sizeof(tp_extra_header));
}
}
static void section_write_packet(MpegTSSection *s, const uint8_t *packet)
{
AVFormatContext *ctx = s->opaque;
mpegts_prefix_m2ts_header(ctx);
avio_write(ctx->pb, packet, TS_PACKET_SIZE);
}
static int mpegts_write_header(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSWriteStream *ts_st;
MpegTSService *service;
AVStream *st, *pcr_st = NULL;
AVDictionaryEntry *title, *provider;
int i, j;
const char *service_name;
const char *provider_name;
int *pids;
int ret;
if (s->max_delay < 0) /* Not set by the caller */
s->max_delay = 0;
// round up to a whole number of TS packets
ts->pes_payload_size = (ts->pes_payload_size + 14 + 183) / 184 * 184 - 14;
ts->tsid = ts->transport_stream_id;
ts->onid = ts->original_network_id;
/* allocate a single DVB service */
title = av_dict_get(s->metadata, "service_name", NULL, 0);
if (!title)
title = av_dict_get(s->metadata, "title", NULL, 0);
service_name = title ? title->value : DEFAULT_SERVICE_NAME;
provider = av_dict_get(s->metadata, "service_provider", NULL, 0);
provider_name = provider ? provider->value : DEFAULT_PROVIDER_NAME;
service = mpegts_add_service(ts, ts->service_id, provider_name, service_name);
service->pmt.write_packet = section_write_packet;
service->pmt.opaque = s;
service->pmt.cc = 15;
ts->pat.pid = PAT_PID;
ts->pat.cc = 15; // Initialize at 15 so that it wraps and be equal to 0 for the first packet we write
ts->pat.write_packet = section_write_packet;
ts->pat.opaque = s;
ts->sdt.pid = SDT_PID;
ts->sdt.cc = 15;
ts->sdt.write_packet = section_write_packet;
ts->sdt.opaque = s;
pids = av_malloc(s->nb_streams * sizeof(*pids));
if (!pids)
return AVERROR(ENOMEM);
/* assign pids to each stream */
for(i = 0;i < s->nb_streams; i++) {
st = s->streams[i];
avpriv_set_pts_info(st, 33, 1, 90000);
ts_st = av_mallocz(sizeof(MpegTSWriteStream));
if (!ts_st) {
ret = AVERROR(ENOMEM);
goto fail;
}
st->priv_data = ts_st;
ts_st->payload = av_mallocz(ts->pes_payload_size);
if (!ts_st->payload) {
ret = AVERROR(ENOMEM);
goto fail;
}
ts_st->service = service;
/* MPEG pid values < 16 are reserved. Applications which set st->id in
* this range are assigned a calculated pid. */
if (st->id < 16) {
ts_st->pid = ts->start_pid + i;
} else if (st->id < 0x1FFF) {
ts_st->pid = st->id;
} else {
av_log(s, AV_LOG_ERROR, "Invalid stream id %d, must be less than 8191\n", st->id);
ret = AVERROR(EINVAL);
goto fail;
}
if (ts_st->pid == service->pmt.pid) {
av_log(s, AV_LOG_ERROR, "Duplicate stream id %d\n", ts_st->pid);
ret = AVERROR(EINVAL);
goto fail;
}
for (j = 0; j < i; j++)
if (pids[j] == ts_st->pid) {
av_log(s, AV_LOG_ERROR, "Duplicate stream id %d\n", ts_st->pid);
ret = AVERROR(EINVAL);
goto fail;
}
pids[i] = ts_st->pid;
ts_st->payload_pts = AV_NOPTS_VALUE;
ts_st->payload_dts = AV_NOPTS_VALUE;
ts_st->first_pts_check = 1;
ts_st->cc = 15;
/* update PCR pid by using the first video stream */
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
service->pcr_pid == 0x1fff) {
service->pcr_pid = ts_st->pid;
pcr_st = st;
}
if (st->codec->codec_id == AV_CODEC_ID_AAC &&
st->codec->extradata_size > 0)
{
AVStream *ast;
ts_st->amux = avformat_alloc_context();
if (!ts_st->amux) {
ret = AVERROR(ENOMEM);
goto fail;
}
ts_st->amux->oformat = av_guess_format((ts->flags & MPEGTS_FLAG_AAC_LATM) ? "latm" : "adts", NULL, NULL);
if (!ts_st->amux->oformat) {
ret = AVERROR(EINVAL);
goto fail;
}
ast = avformat_new_stream(ts_st->amux, NULL);
ret = avcodec_copy_context(ast->codec, st->codec);
if (ret != 0)
goto fail;
ret = avformat_write_header(ts_st->amux, NULL);
if (ret < 0)
goto fail;
}
}
av_free(pids);
/* if no video stream, use the first stream as PCR */
if (service->pcr_pid == 0x1fff && s->nb_streams > 0) {
pcr_st = s->streams[0];
ts_st = pcr_st->priv_data;
service->pcr_pid = ts_st->pid;
}
if (ts->mux_rate > 1) {
service->pcr_packet_period = (ts->mux_rate * PCR_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
ts->sdt_packet_period = (ts->mux_rate * SDT_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
ts->pat_packet_period = (ts->mux_rate * PAT_RETRANS_TIME) /
(TS_PACKET_SIZE * 8 * 1000);
if(ts->copyts < 1)
ts->first_pcr = av_rescale(s->max_delay, PCR_TIME_BASE, AV_TIME_BASE);
} else {
/* Arbitrary values, PAT/PMT will also be written on video key frames */
ts->sdt_packet_period = 200;
ts->pat_packet_period = 40;
if (pcr_st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (!pcr_st->codec->frame_size) {
av_log(s, AV_LOG_WARNING, "frame size not set\n");
service->pcr_packet_period =
pcr_st->codec->sample_rate/(10*512);
} else {
service->pcr_packet_period =
pcr_st->codec->sample_rate/(10*pcr_st->codec->frame_size);
}
} else {
// max delta PCR 0.1s
service->pcr_packet_period =
pcr_st->codec->time_base.den/(10*pcr_st->codec->time_base.num);
}
if(!service->pcr_packet_period)
service->pcr_packet_period = 1;
}
// output a PCR as soon as possible
service->pcr_packet_count = service->pcr_packet_period;
ts->pat_packet_count = ts->pat_packet_period-1;
ts->sdt_packet_count = ts->sdt_packet_period-1;
if (ts->mux_rate == 1)
av_log(s, AV_LOG_VERBOSE, "muxrate VBR, ");
else
av_log(s, AV_LOG_VERBOSE, "muxrate %d, ", ts->mux_rate);
av_log(s, AV_LOG_VERBOSE, "pcr every %d pkts, "
"sdt every %d, pat/pmt every %d pkts\n",
service->pcr_packet_period,
ts->sdt_packet_period, ts->pat_packet_period);
if (ts->m2ts_mode == -1) {
if (av_match_ext(s->filename, "m2ts")) {
ts->m2ts_mode = 1;
} else {
ts->m2ts_mode = 0;
}
}
avio_flush(s->pb);
return 0;
fail:
av_free(pids);
for(i = 0;i < s->nb_streams; i++) {
MpegTSWriteStream *ts_st;
st = s->streams[i];
ts_st = st->priv_data;
if (ts_st) {
av_freep(&ts_st->payload);
if (ts_st->amux) {
avformat_free_context(ts_st->amux);
ts_st->amux = NULL;
}
}
av_freep(&st->priv_data);
}
return ret;
}
/* send SDT, PAT and PMT tables regulary */
static void retransmit_si_info(AVFormatContext *s, int force_pat)
{
MpegTSWrite *ts = s->priv_data;
int i;
if (++ts->sdt_packet_count == ts->sdt_packet_period) {
ts->sdt_packet_count = 0;
mpegts_write_sdt(s);
}
if (++ts->pat_packet_count == ts->pat_packet_period || force_pat) {
ts->pat_packet_count = 0;
mpegts_write_pat(s);
for(i = 0; i < ts->nb_services; i++) {
mpegts_write_pmt(s, ts->services[i]);
}
}
}
static int write_pcr_bits(uint8_t *buf, int64_t pcr)
{
int64_t pcr_low = pcr % 300, pcr_high = pcr / 300;
*buf++ = pcr_high >> 25;
*buf++ = pcr_high >> 17;
*buf++ = pcr_high >> 9;
*buf++ = pcr_high >> 1;
*buf++ = pcr_high << 7 | pcr_low >> 8 | 0x7e;
*buf++ = pcr_low;
return 6;
}
/* Write a single null transport stream packet */
static void mpegts_insert_null_packet(AVFormatContext *s)
{
uint8_t *q;
uint8_t buf[TS_PACKET_SIZE];
q = buf;
*q++ = 0x47;
*q++ = 0x00 | 0x1f;
*q++ = 0xff;
*q++ = 0x10;
memset(q, 0x0FF, TS_PACKET_SIZE - (q - buf));
mpegts_prefix_m2ts_header(s);
avio_write(s->pb, buf, TS_PACKET_SIZE);
}
/* Write a single transport stream packet with a PCR and no payload */
static void mpegts_insert_pcr_only(AVFormatContext *s, AVStream *st)
{
MpegTSWrite *ts = s->priv_data;
MpegTSWriteStream *ts_st = st->priv_data;
uint8_t *q;
uint8_t buf[TS_PACKET_SIZE];
q = buf;
*q++ = 0x47;
*q++ = ts_st->pid >> 8;
*q++ = ts_st->pid;
*q++ = 0x20 | ts_st->cc; /* Adaptation only */
/* Continuity Count field does not increment (see 13818-1 section 2.4.3.3) */
*q++ = TS_PACKET_SIZE - 5; /* Adaptation Field Length */
*q++ = 0x10; /* Adaptation flags: PCR present */
/* PCR coded into 6 bytes */
q += write_pcr_bits(q, get_pcr(ts, s->pb));
/* stuffing bytes */
memset(q, 0xFF, TS_PACKET_SIZE - (q - buf));
mpegts_prefix_m2ts_header(s);
avio_write(s->pb, buf, TS_PACKET_SIZE);
}
static void write_pts(uint8_t *q, int fourbits, int64_t pts)
{
int val;
val = fourbits << 4 | (((pts >> 30) & 0x07) << 1) | 1;
*q++ = val;
val = (((pts >> 15) & 0x7fff) << 1) | 1;
*q++ = val >> 8;
*q++ = val;
val = (((pts) & 0x7fff) << 1) | 1;
*q++ = val >> 8;
*q++ = val;
}
/* Set an adaptation field flag in an MPEG-TS packet*/
static void set_af_flag(uint8_t *pkt, int flag)
{
// expect at least one flag to set
av_assert0(flag);
if ((pkt[3] & 0x20) == 0) {
// no AF yet, set adaptation field flag
pkt[3] |= 0x20;
// 1 byte length, no flags
pkt[4] = 1;
pkt[5] = 0;
}
pkt[5] |= flag;
}
/* Extend the adaptation field by size bytes */
static void extend_af(uint8_t *pkt, int size)
{
// expect already existing adaptation field
av_assert0(pkt[3] & 0x20);
pkt[4] += size;
}
/* Get a pointer to MPEG-TS payload (right after TS packet header) */
static uint8_t *get_ts_payload_start(uint8_t *pkt)
{
if (pkt[3] & 0x20)
return pkt + 5 + pkt[4];
else
return pkt + 4;
}
/* Add a pes header to the front of payload, and segment into an integer number of
* ts packets. The final ts packet is padded using an over-sized adaptation header
* to exactly fill the last ts packet.
* NOTE: 'payload' contains a complete PES payload.
*/
static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
const uint8_t *payload, int payload_size,
int64_t pts, int64_t dts, int key)
{
MpegTSWriteStream *ts_st = st->priv_data;
MpegTSWrite *ts = s->priv_data;
uint8_t buf[TS_PACKET_SIZE];
uint8_t *q;
int val, is_start, len, header_len, write_pcr, is_dvb_subtitle, is_dvb_teletext, flags;
int afc_len, stuffing_len;
int64_t pcr = -1; /* avoid warning */
int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE);
int force_pat = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && key && !ts_st->prev_payload_key;
is_start = 1;
while (payload_size > 0) {
retransmit_si_info(s, force_pat);
force_pat = 0;
write_pcr = 0;
if (ts_st->pid == ts_st->service->pcr_pid) {
if (ts->mux_rate > 1 || is_start) // VBR pcr period is based on frames
ts_st->service->pcr_packet_count++;
if (ts_st->service->pcr_packet_count >=
ts_st->service->pcr_packet_period) {
ts_st->service->pcr_packet_count = 0;
write_pcr = 1;
}
}
if (ts->mux_rate > 1 && dts != AV_NOPTS_VALUE &&
(dts - get_pcr(ts, s->pb)/300) > delay) {
/* pcr insert gets priority over null packet insert */
if (write_pcr)
mpegts_insert_pcr_only(s, st);
else
mpegts_insert_null_packet(s);
continue; /* recalculate write_pcr and possibly retransmit si_info */
}
/* prepare packet header */
q = buf;
*q++ = 0x47;
val = (ts_st->pid >> 8);
if (is_start)
val |= 0x40;
*q++ = val;
*q++ = ts_st->pid;
ts_st->cc = (ts_st->cc + 1) & 0xf;
*q++ = 0x10 | ts_st->cc; // payload indicator + CC
if (key && is_start && pts != AV_NOPTS_VALUE) {
// set Random Access for key frames
if (ts_st->pid == ts_st->service->pcr_pid)
write_pcr = 1;
set_af_flag(buf, 0x40);
q = get_ts_payload_start(buf);
}
if (write_pcr) {
set_af_flag(buf, 0x10);
q = get_ts_payload_start(buf);
// add 11, pcr references the last byte of program clock reference base
if (ts->mux_rate > 1)
pcr = get_pcr(ts, s->pb);
else
pcr = (dts - delay)*300;
if (dts != AV_NOPTS_VALUE && dts < pcr / 300)
av_log(s, AV_LOG_WARNING, "dts < pcr, TS is invalid\n");
extend_af(buf, write_pcr_bits(q, pcr));
q = get_ts_payload_start(buf);
}
if (is_start) {
int pes_extension = 0;
int pes_header_stuffing_bytes = 0;
/* write PES header */
*q++ = 0x00;
*q++ = 0x00;
*q++ = 0x01;
is_dvb_subtitle = 0;
is_dvb_teletext = 0;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (st->codec->codec_id == AV_CODEC_ID_DIRAC) {
*q++ = 0xfd;
} else
*q++ = 0xe0;
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
(st->codec->codec_id == AV_CODEC_ID_MP2 ||
st->codec->codec_id == AV_CODEC_ID_MP3 ||
st->codec->codec_id == AV_CODEC_ID_AAC)) {
*q++ = 0xc0;
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
st->codec->codec_id == AV_CODEC_ID_AC3 &&
ts->m2ts_mode) {
*q++ = 0xfd;
} else {
*q++ = 0xbd;
if(st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
if (st->codec->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
is_dvb_subtitle = 1;
} else if (st->codec->codec_id == AV_CODEC_ID_DVB_TELETEXT) {
is_dvb_teletext = 1;
}
}
}
header_len = 0;
flags = 0;
if (pts != AV_NOPTS_VALUE) {
header_len += 5;
flags |= 0x80;
}
if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) {
header_len += 5;
flags |= 0x40;
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
st->codec->codec_id == AV_CODEC_ID_DIRAC) {
/* set PES_extension_flag */
pes_extension = 1;
flags |= 0x01;
/*
* One byte for PES2 extension flag +
* one byte for extension length +
* one byte for extension id
*/
header_len += 3;
}
/* for Blu-ray AC3 Audio the PES Extension flag should be as follow
* otherwise it will not play sound on blu-ray
*/
if (ts->m2ts_mode &&
st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
st->codec->codec_id == AV_CODEC_ID_AC3) {
/* set PES_extension_flag */
pes_extension = 1;
flags |= 0x01;
header_len += 3;
}
if (is_dvb_teletext) {
pes_header_stuffing_bytes = 0x24 - header_len;
header_len = 0x24;
}
len = payload_size + header_len + 3;
/* 3 extra bytes should be added to DVB subtitle payload: 0x20 0x00 at the beginning and trailing 0xff */
if (is_dvb_subtitle) {
len += 3;
payload_size++;
}
if (len > 0xffff)
len = 0;
*q++ = len >> 8;
*q++ = len;
val = 0x80;
/* data alignment indicator is required for subtitle and data streams */
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA)
val |= 0x04;
*q++ = val;
*q++ = flags;
*q++ = header_len;
if (pts != AV_NOPTS_VALUE) {
write_pts(q, flags >> 6, pts);
q += 5;
}
if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) {
write_pts(q, 1, dts);
q += 5;
}
if (pes_extension && st->codec->codec_id == AV_CODEC_ID_DIRAC) {
flags = 0x01; /* set PES_extension_flag_2 */
*q++ = flags;
*q++ = 0x80 | 0x01; /* marker bit + extension length */
/*
* Set the stream id extension flag bit to 0 and
* write the extended stream id
*/
*q++ = 0x00 | 0x60;
}
/* For Blu-ray AC3 Audio Setting extended flags */
if (ts->m2ts_mode &&
pes_extension &&
st->codec->codec_id == AV_CODEC_ID_AC3) {
flags = 0x01; /* set PES_extension_flag_2 */
*q++ = flags;
*q++ = 0x80 | 0x01; /* marker bit + extension length */
*q++ = 0x00 | 0x71; /* for AC3 Audio (specifically on blue-rays) */
}
if (is_dvb_subtitle) {
/* First two fields of DVB subtitles PES data:
* data_identifier: for DVB subtitle streams shall be coded with the value 0x20
* subtitle_stream_id: for DVB subtitle stream shall be identified by the value 0x00 */
*q++ = 0x20;
*q++ = 0x00;
}
if (is_dvb_teletext) {
memset(q, 0xff, pes_header_stuffing_bytes);
q += pes_header_stuffing_bytes;
}
is_start = 0;
}
/* header size */
header_len = q - buf;
/* data len */
len = TS_PACKET_SIZE - header_len;
if (len > payload_size)
len = payload_size;
stuffing_len = TS_PACKET_SIZE - header_len - len;
if (stuffing_len > 0) {
/* add stuffing with AFC */
if (buf[3] & 0x20) {
/* stuffing already present: increase its size */
afc_len = buf[4] + 1;
memmove(buf + 4 + afc_len + stuffing_len,
buf + 4 + afc_len,
header_len - (4 + afc_len));
buf[4] += stuffing_len;
memset(buf + 4 + afc_len, 0xff, stuffing_len);
} else {
/* add stuffing */
memmove(buf + 4 + stuffing_len, buf + 4, header_len - 4);
buf[3] |= 0x20;
buf[4] = stuffing_len - 1;
if (stuffing_len >= 2) {
buf[5] = 0x00;
memset(buf + 6, 0xff, stuffing_len - 2);
}
}
}
if (is_dvb_subtitle && payload_size == len) {
memcpy(buf + TS_PACKET_SIZE - len, payload, len - 1);
buf[TS_PACKET_SIZE - 1] = 0xff; /* end_of_PES_data_field_marker: an 8-bit field with fixed contents 0xff for DVB subtitle */
} else {
memcpy(buf + TS_PACKET_SIZE - len, payload, len);
}
payload += len;
payload_size -= len;
mpegts_prefix_m2ts_header(s);
avio_write(s->pb, buf, TS_PACKET_SIZE);
}
avio_flush(s->pb);
ts_st->prev_payload_key = key;
}
static int mpegts_write_packet_internal(AVFormatContext *s, AVPacket *pkt)
{
AVStream *st = s->streams[pkt->stream_index];
int size = pkt->size;
uint8_t *buf= pkt->data;
uint8_t *data= NULL;
MpegTSWrite *ts = s->priv_data;
MpegTSWriteStream *ts_st = st->priv_data;
const int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE)*2;
int64_t dts = pkt->dts, pts = pkt->pts;
if (ts->reemit_pat_pmt) {
av_log(s, AV_LOG_WARNING, "resend_headers option is deprecated, use -mpegts_flags resend_headers\n");
ts->reemit_pat_pmt = 0;
ts->flags |= MPEGTS_FLAG_REEMIT_PAT_PMT;
}
if (ts->flags & MPEGTS_FLAG_REEMIT_PAT_PMT) {
ts->pat_packet_count = ts->pat_packet_period - 1;
ts->sdt_packet_count = ts->sdt_packet_period - 1;
ts->flags &= ~MPEGTS_FLAG_REEMIT_PAT_PMT;
}
if(ts->copyts < 1){
if (pts != AV_NOPTS_VALUE)
pts += delay;
if (dts != AV_NOPTS_VALUE)
dts += delay;
}
if (ts_st->first_pts_check && pts == AV_NOPTS_VALUE) {
av_log(s, AV_LOG_ERROR, "first pts value must be set\n");
return AVERROR_INVALIDDATA;
}
ts_st->first_pts_check = 0;
if (st->codec->codec_id == AV_CODEC_ID_H264) {
const uint8_t *p = buf, *buf_end = p+size;
uint32_t state = -1;
if (pkt->size < 5 || AV_RB32(pkt->data) != 0x0000001) {
if (!st->nb_frames) {
av_log(s, AV_LOG_ERROR, "H.264 bitstream malformed, "
"no startcode found, use the h264_mp4toannexb bitstream filter (-bsf h264_mp4toannexb)\n");
return AVERROR(EINVAL);
}
av_log(s, AV_LOG_WARNING, "H.264 bitstream error, startcode missing\n");
}
do {
p = avpriv_find_start_code(p, buf_end, &state);
av_dlog(s, "nal %d\n", state & 0x1f);
} while (p < buf_end && (state & 0x1f) != 9 &&
(state & 0x1f) != 5 && (state & 0x1f) != 1);
if ((state & 0x1f) != 9) { // AUD NAL
data = av_malloc(pkt->size+6);
if (!data)
return AVERROR(ENOMEM);
memcpy(data+6, pkt->data, pkt->size);
AV_WB32(data, 0x00000001);
data[4] = 0x09;
data[5] = 0xf0; // any slice type (0xe) + rbsp stop one bit
buf = data;
size = pkt->size+6;
}
} else if (st->codec->codec_id == AV_CODEC_ID_AAC) {
if (pkt->size < 2) {
av_log(s, AV_LOG_ERROR, "AAC packet too short\n");
return AVERROR_INVALIDDATA;
}
if ((AV_RB16(pkt->data) & 0xfff0) != 0xfff0) {
int ret;
AVPacket pkt2;
if (!ts_st->amux) {
av_log(s, AV_LOG_ERROR, "AAC bitstream not in ADTS format "
"and extradata missing\n");
return AVERROR_INVALIDDATA;
}
av_init_packet(&pkt2);
pkt2.data = pkt->data;
pkt2.size = pkt->size;
ret = avio_open_dyn_buf(&ts_st->amux->pb);
if (ret < 0)
return AVERROR(ENOMEM);
ret = av_write_frame(ts_st->amux, &pkt2);
if (ret < 0) {
avio_close_dyn_buf(ts_st->amux->pb, &data);
ts_st->amux->pb = NULL;
av_free(data);
return ret;
}
size = avio_close_dyn_buf(ts_st->amux->pb, &data);
ts_st->amux->pb = NULL;
buf = data;
}
}
if (pkt->dts != AV_NOPTS_VALUE) {
int i;
for(i=0; i<s->nb_streams; i++){
AVStream *st2 = s->streams[i];
MpegTSWriteStream *ts_st2 = st2->priv_data;
if( ts_st2->payload_size
&& (ts_st2->payload_dts == AV_NOPTS_VALUE || dts - ts_st2->payload_dts > delay/2)){
mpegts_write_pes(s, st2, ts_st2->payload, ts_st2->payload_size,
ts_st2->payload_pts, ts_st2->payload_dts,
ts_st2->payload_flags & AV_PKT_FLAG_KEY);
ts_st2->payload_size = 0;
}
}
}
if (ts_st->payload_size && ts_st->payload_size + size > ts->pes_payload_size) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size,
ts_st->payload_pts, ts_st->payload_dts,
ts_st->payload_flags & AV_PKT_FLAG_KEY);
ts_st->payload_size = 0;
}
if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO || size > ts->pes_payload_size) {
av_assert0(!ts_st->payload_size);
// for video and subtitle, write a single pes packet
mpegts_write_pes(s, st, buf, size, pts, dts, pkt->flags & AV_PKT_FLAG_KEY);
av_free(data);
return 0;
}
if (!ts_st->payload_size) {
ts_st->payload_pts = pts;
ts_st->payload_dts = dts;
ts_st->payload_flags = pkt->flags;
}
memcpy(ts_st->payload + ts_st->payload_size, buf, size);
ts_st->payload_size += size;
av_free(data);
return 0;
}
static void mpegts_write_flush(AVFormatContext *s)
{
int i;
/* flush current packets */
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
if (ts_st->payload_size > 0) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size,
ts_st->payload_pts, ts_st->payload_dts,
ts_st->payload_flags & AV_PKT_FLAG_KEY);
ts_st->payload_size = 0;
}
}
avio_flush(s->pb);
}
static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
{
if (!pkt) {
mpegts_write_flush(s);
return 1;
} else {
return mpegts_write_packet_internal(s, pkt);
}
}
static int mpegts_write_end(AVFormatContext *s)
{
MpegTSWrite *ts = s->priv_data;
MpegTSService *service;
int i;
mpegts_write_flush(s);
for(i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
av_freep(&ts_st->payload);
if (ts_st->amux) {
avformat_free_context(ts_st->amux);
ts_st->amux = NULL;
}
}
for(i = 0; i < ts->nb_services; i++) {
service = ts->services[i];
av_freep(&service->provider_name);
av_freep(&service->name);
av_free(service);
}
av_free(ts->services);
return 0;
}
AVOutputFormat ff_mpegts_muxer = {
.name = "mpegts",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-TS (MPEG-2 Transport Stream)"),
.mime_type = "video/x-mpegts",
.extensions = "ts,m2t,m2ts,mts",
.priv_data_size = sizeof(MpegTSWrite),
.audio_codec = AV_CODEC_ID_MP2,
.video_codec = AV_CODEC_ID_MPEG2VIDEO,
.write_header = mpegts_write_header,
.write_packet = mpegts_write_packet,
.write_trailer = mpegts_write_end,
.flags = AVFMT_ALLOW_FLUSH,
.priv_class = &mpegts_muxer_class,
};
| jasonchuang/SoftwareAudioPlayer | jni/ffmpeg-2.2/libavformat/mpegtsenc.c | C | apache-2.0 | 48,096 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/codestar-notifications/model/DescribeNotificationRuleRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::CodeStarNotifications::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
DescribeNotificationRuleRequest::DescribeNotificationRuleRequest() :
m_arnHasBeenSet(false)
{
}
Aws::String DescribeNotificationRuleRequest::SerializePayload() const
{
JsonValue payload;
if(m_arnHasBeenSet)
{
payload.WithString("Arn", m_arn);
}
return payload.View().WriteReadable();
}
| awslabs/aws-sdk-cpp | aws-cpp-sdk-codestar-notifications/source/model/DescribeNotificationRuleRequest.cpp | C++ | apache-2.0 | 695 |
package settings_test
import (
"encoding/json"
"errors"
. "github.com/cloudfoundry/bosh-agent/internal/github.com/onsi/ginkgo"
. "github.com/cloudfoundry/bosh-agent/internal/github.com/onsi/gomega"
"github.com/cloudfoundry/bosh-agent/infrastructure/fakes"
boshlog "github.com/cloudfoundry/bosh-agent/internal/github.com/cloudfoundry/bosh-utils/logger"
fakesys "github.com/cloudfoundry/bosh-agent/internal/github.com/cloudfoundry/bosh-utils/system/fakes"
fakenet "github.com/cloudfoundry/bosh-agent/platform/net/fakes"
. "github.com/cloudfoundry/bosh-agent/settings"
)
func init() {
Describe("settingsService", func() {
var (
fs *fakesys.FakeFileSystem
fakeDefaultNetworkResolver *fakenet.FakeDefaultNetworkResolver
fakeSettingsSource *fakes.FakeSettingsSource
)
BeforeEach(func() {
fs = fakesys.NewFakeFileSystem()
fakeDefaultNetworkResolver = &fakenet.FakeDefaultNetworkResolver{}
fakeSettingsSource = &fakes.FakeSettingsSource{}
})
buildService := func() (Service, *fakesys.FakeFileSystem) {
logger := boshlog.NewLogger(boshlog.LevelNone)
service := NewService(fs, "/setting/path.json", fakeSettingsSource, fakeDefaultNetworkResolver, logger)
return service, fs
}
Describe("LoadSettings", func() {
var (
fetchedSettings Settings
fetcherFuncErr error
service Service
)
BeforeEach(func() {
fetchedSettings = Settings{}
fetcherFuncErr = nil
})
JustBeforeEach(func() {
fakeSettingsSource.SettingsValue = fetchedSettings
fakeSettingsSource.SettingsErr = fetcherFuncErr
service, fs = buildService()
})
Context("when settings fetcher succeeds fetching settings", func() {
BeforeEach(func() {
fetchedSettings = Settings{AgentID: "some-new-agent-id"}
})
Context("when settings contain at most one dynamic network", func() {
BeforeEach(func() {
fetchedSettings.Networks = Networks{
"fake-net-1": Network{Type: NetworkTypeDynamic},
}
})
It("updates the service with settings from the fetcher", func() {
err := service.LoadSettings()
Expect(err).NotTo(HaveOccurred())
Expect(service.GetSettings().AgentID).To(Equal("some-new-agent-id"))
})
It("persists settings to the settings file", func() {
err := service.LoadSettings()
Expect(err).NotTo(HaveOccurred())
json, err := json.Marshal(fetchedSettings)
Expect(err).NotTo(HaveOccurred())
fileContent, err := fs.ReadFile("/setting/path.json")
Expect(err).NotTo(HaveOccurred())
Expect(fileContent).To(Equal(json))
})
It("returns any error from writing to the setting file", func() {
fs.WriteFileError = errors.New("fs-write-file-error")
err := service.LoadSettings()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fs-write-file-error"))
})
})
})
Context("when settings fetcher fails fetching settings", func() {
BeforeEach(func() {
fetcherFuncErr = errors.New("fake-fetch-error")
})
Context("when a settings file exists", func() {
Context("when settings contain at most one dynamic network", func() {
BeforeEach(func() {
fs.WriteFile("/setting/path.json", []byte(`{
"agent_id":"some-agent-id",
"networks": {"fake-net-1": {"type": "dynamic"}}
}`))
fakeDefaultNetworkResolver.GetDefaultNetworkNetwork = Network{
IP: "fake-resolved-ip",
Netmask: "fake-resolved-netmask",
Gateway: "fake-resolved-gateway",
}
})
It("returns settings from the settings file with resolved network", func() {
err := service.LoadSettings()
Expect(err).ToNot(HaveOccurred())
Expect(service.GetSettings()).To(Equal(Settings{
AgentID: "some-agent-id",
Networks: Networks{
"fake-net-1": Network{
Type: NetworkTypeDynamic,
IP: "fake-resolved-ip",
Netmask: "fake-resolved-netmask",
Gateway: "fake-resolved-gateway",
Resolved: true,
},
},
}))
})
})
})
Context("when non-unmarshallable settings file exists", func() {
It("returns any error from the fetcher", func() {
fs.WriteFile("/setting/path.json", []byte(`$%^&*(`))
err := service.LoadSettings()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-fetch-error"))
Expect(service.GetSettings()).To(Equal(Settings{}))
})
})
Context("when no settings file exists", func() {
It("returns any error from the fetcher", func() {
err := service.LoadSettings()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-fetch-error"))
Expect(service.GetSettings()).To(Equal(Settings{}))
})
})
})
})
Describe("InvalidateSettings", func() {
It("removes the settings file", func() {
fakeSettingsSource.SettingsValue = Settings{}
fakeSettingsSource.SettingsErr = nil
service, fs := buildService()
fs.WriteFile("/setting/path.json", []byte(`{}`))
err := service.InvalidateSettings()
Expect(err).ToNot(HaveOccurred())
Expect(fs.FileExists("/setting/path.json")).To(BeFalse())
})
It("returns err if removing settings file errored", func() {
fakeSettingsSource.SettingsValue = Settings{}
fakeSettingsSource.SettingsErr = nil
service, fs := buildService()
fs.RemoveAllError = errors.New("fs-remove-all-error")
err := service.InvalidateSettings()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fs-remove-all-error"))
})
})
Describe("GetSettings", func() {
var (
loadedSettings Settings
service Service
)
BeforeEach(func() {
loadedSettings = Settings{AgentID: "some-agent-id"}
})
JustBeforeEach(func() {
fakeSettingsSource.SettingsValue = loadedSettings
fakeSettingsSource.SettingsErr = nil
service, _ = buildService()
err := service.LoadSettings()
Expect(err).NotTo(HaveOccurred())
})
Context("when there is are no dynamic networks", func() {
It("returns settings without modifying any networks", func() {
Expect(service.GetSettings()).To(Equal(loadedSettings))
})
It("does not try to determine default network", func() {
_ = service.GetSettings()
Expect(fakeDefaultNetworkResolver.GetDefaultNetworkCalled).To(BeFalse())
})
})
Context("when there is network that needs to be resolved (ip, netmask, or mac are not set)", func() {
BeforeEach(func() {
loadedSettings = Settings{
Networks: map[string]Network{
"fake-net1": Network{
IP: "fake-net1-ip",
Netmask: "fake-net1-netmask",
Mac: "fake-net1-mac",
Gateway: "fake-net1-gateway",
},
"fake-net2": Network{
Gateway: "fake-net2-gateway",
DNS: []string{"fake-net2-dns"},
},
},
}
})
Context("when default network can be retrieved", func() {
BeforeEach(func() {
fakeDefaultNetworkResolver.GetDefaultNetworkNetwork = Network{
IP: "fake-resolved-ip",
Netmask: "fake-resolved-netmask",
Gateway: "fake-resolved-gateway",
}
})
It("returns settings with resolved dynamic network ip, netmask, gateway and keeping everything else the same", func() {
settings := service.GetSettings()
Expect(settings).To(Equal(Settings{
Networks: map[string]Network{
"fake-net1": Network{
IP: "fake-net1-ip",
Netmask: "fake-net1-netmask",
Mac: "fake-net1-mac",
Gateway: "fake-net1-gateway",
},
"fake-net2": Network{
IP: "fake-resolved-ip",
Netmask: "fake-resolved-netmask",
Gateway: "fake-resolved-gateway",
DNS: []string{"fake-net2-dns"},
Resolved: true,
},
},
}))
})
})
Context("when default network fails to be retrieved", func() {
BeforeEach(func() {
fakeDefaultNetworkResolver.GetDefaultNetworkErr = errors.New("fake-get-default-network-err")
})
It("returns error", func() {
settings := service.GetSettings()
Expect(settings).To(Equal(loadedSettings))
})
})
})
})
})
}
| alex8023/bosh-agent | settings/service_test.go | GO | apache-2.0 | 8,409 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Alexey A. Ivanov
*/
package javax.swing.text;
import javax.swing.event.DocumentListener;
import javax.swing.event.UndoableEditListener;
public interface Document {
String StreamDescriptionProperty = "stream";
String TitleProperty = "title";
void addDocumentListener(final DocumentListener listener);
void addUndoableEditListener(UndoableEditListener listener);
Position createPosition(int offset) throws BadLocationException;
Element getDefaultRootElement();
Position getEndPosition();
int getLength();
Object getProperty(Object key);
Element[] getRootElements();
Position getStartPosition();
String getText(int offset, int length) throws BadLocationException;
void getText(int offset, int length, Segment text)
throws BadLocationException;
void insertString(int offset, String text, AttributeSet attrs)
throws BadLocationException;
void putProperty(Object key, Object value);
void remove(int offset, int length) throws BadLocationException;
void removeDocumentListener(DocumentListener listener);
void removeUndoableEditListener(UndoableEditListener listener);
void render(Runnable r);
} | freeVM/freeVM | enhanced/java/classlib/modules/swing/src/main/java/common/javax/swing/text/Document.java | Java | apache-2.0 | 2,049 |
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "system_properties/prop_area.h"
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/cdefs.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/xattr.h>
#include <unistd.h>
#include <new>
#include <async_safe/log.h>
constexpr size_t PA_SIZE = 128 * 1024;
constexpr uint32_t PROP_AREA_MAGIC = 0x504f5250;
constexpr uint32_t PROP_AREA_VERSION = 0xfc6ed0ab;
size_t prop_area::pa_size_ = 0;
size_t prop_area::pa_data_size_ = 0;
prop_area* prop_area::map_prop_area_rw(const char* filename, const char* context,
bool* fsetxattr_failed) {
/* dev is a tmpfs that we can use to carve a shared workspace
* out of, so let's do that...
*/
const int fd = open(filename, O_RDWR | O_CREAT | O_NOFOLLOW | O_CLOEXEC | O_EXCL, 0444);
if (fd < 0) {
if (errno == EACCES) {
/* for consistency with the case where the process has already
* mapped the page in and segfaults when trying to write to it
*/
abort();
}
return nullptr;
}
if (context) {
if (fsetxattr(fd, XATTR_NAME_SELINUX, context, strlen(context) + 1, 0) != 0) {
async_safe_format_log(ANDROID_LOG_ERROR, "libc",
"fsetxattr failed to set context (%s) for \"%s\"", context, filename);
/*
* fsetxattr() will fail during system properties tests due to selinux policy.
* We do not want to create a custom policy for the tester, so we will continue in
* this function but set a flag that an error has occurred.
* Init, which is the only daemon that should ever call this function will abort
* when this error occurs.
* Otherwise, the tester will ignore it and continue, albeit without any selinux
* property separation.
*/
if (fsetxattr_failed) {
*fsetxattr_failed = true;
}
}
}
if (ftruncate(fd, PA_SIZE) < 0) {
close(fd);
return nullptr;
}
pa_size_ = PA_SIZE;
pa_data_size_ = pa_size_ - sizeof(prop_area);
void* const memory_area = mmap(nullptr, pa_size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (memory_area == MAP_FAILED) {
close(fd);
return nullptr;
}
prop_area* pa = new (memory_area) prop_area(PROP_AREA_MAGIC, PROP_AREA_VERSION);
close(fd);
return pa;
}
prop_area* prop_area::map_fd_ro(const int fd) {
struct stat fd_stat;
if (fstat(fd, &fd_stat) < 0) {
return nullptr;
}
if ((fd_stat.st_uid != 0) || (fd_stat.st_gid != 0) ||
((fd_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) ||
(fd_stat.st_size < static_cast<off_t>(sizeof(prop_area)))) {
return nullptr;
}
pa_size_ = fd_stat.st_size;
pa_data_size_ = pa_size_ - sizeof(prop_area);
void* const map_result = mmap(nullptr, pa_size_, PROT_READ, MAP_SHARED, fd, 0);
if (map_result == MAP_FAILED) {
return nullptr;
}
prop_area* pa = reinterpret_cast<prop_area*>(map_result);
if ((pa->magic() != PROP_AREA_MAGIC) || (pa->version() != PROP_AREA_VERSION)) {
munmap(pa, pa_size_);
return nullptr;
}
return pa;
}
prop_area* prop_area::map_prop_area(const char* filename) {
int fd = open(filename, O_CLOEXEC | O_NOFOLLOW | O_RDONLY);
if (fd == -1) return nullptr;
prop_area* map_result = map_fd_ro(fd);
close(fd);
return map_result;
}
void* prop_area::allocate_obj(const size_t size, uint_least32_t* const off) {
const size_t aligned = __BIONIC_ALIGN(size, sizeof(uint_least32_t));
if (bytes_used_ + aligned > pa_data_size_) {
return nullptr;
}
*off = bytes_used_;
bytes_used_ += aligned;
return data_ + *off;
}
prop_bt* prop_area::new_prop_bt(const char* name, uint32_t namelen, uint_least32_t* const off) {
uint_least32_t new_offset;
void* const p = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset);
if (p != nullptr) {
prop_bt* bt = new (p) prop_bt(name, namelen);
*off = new_offset;
return bt;
}
return nullptr;
}
prop_info* prop_area::new_prop_info(const char* name, uint32_t namelen, const char* value,
uint32_t valuelen, uint_least32_t* const off) {
uint_least32_t new_offset;
void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset);
if (p == nullptr) return nullptr;
prop_info* info;
if (valuelen >= PROP_VALUE_MAX) {
uint32_t long_value_offset = 0;
char* long_location = reinterpret_cast<char*>(allocate_obj(valuelen + 1, &long_value_offset));
if (!long_location) return nullptr;
memcpy(long_location, value, valuelen);
long_location[valuelen] = '\0';
// Both new_offset and long_value_offset are offsets based off of data_, however prop_info
// does not know what data_ is, so we change this offset to be an offset from the prop_info
// pointer that contains it.
long_value_offset -= new_offset;
info = new (p) prop_info(name, namelen, long_value_offset);
} else {
info = new (p) prop_info(name, namelen, value, valuelen);
}
*off = new_offset;
return info;
}
void* prop_area::to_prop_obj(uint_least32_t off) {
if (off > pa_data_size_) return nullptr;
return (data_ + off);
}
inline prop_bt* prop_area::to_prop_bt(atomic_uint_least32_t* off_p) {
uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
return reinterpret_cast<prop_bt*>(to_prop_obj(off));
}
inline prop_info* prop_area::to_prop_info(atomic_uint_least32_t* off_p) {
uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
return reinterpret_cast<prop_info*>(to_prop_obj(off));
}
inline prop_bt* prop_area::root_node() {
return reinterpret_cast<prop_bt*>(to_prop_obj(0));
}
static int cmp_prop_name(const char* one, uint32_t one_len, const char* two, uint32_t two_len) {
if (one_len < two_len)
return -1;
else if (one_len > two_len)
return 1;
else
return strncmp(one, two, one_len);
}
prop_bt* prop_area::find_prop_bt(prop_bt* const bt, const char* name, uint32_t namelen,
bool alloc_if_needed) {
prop_bt* current = bt;
while (true) {
if (!current) {
return nullptr;
}
const int ret = cmp_prop_name(name, namelen, current->name, current->namelen);
if (ret == 0) {
return current;
}
if (ret < 0) {
uint_least32_t left_offset = atomic_load_explicit(¤t->left, memory_order_relaxed);
if (left_offset != 0) {
current = to_prop_bt(¤t->left);
} else {
if (!alloc_if_needed) {
return nullptr;
}
uint_least32_t new_offset;
prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
if (new_bt) {
atomic_store_explicit(¤t->left, new_offset, memory_order_release);
}
return new_bt;
}
} else {
uint_least32_t right_offset = atomic_load_explicit(¤t->right, memory_order_relaxed);
if (right_offset != 0) {
current = to_prop_bt(¤t->right);
} else {
if (!alloc_if_needed) {
return nullptr;
}
uint_least32_t new_offset;
prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
if (new_bt) {
atomic_store_explicit(¤t->right, new_offset, memory_order_release);
}
return new_bt;
}
}
}
}
const prop_info* prop_area::find_property(prop_bt* const trie, const char* name, uint32_t namelen,
const char* value, uint32_t valuelen,
bool alloc_if_needed) {
if (!trie) return nullptr;
const char* remaining_name = name;
prop_bt* current = trie;
while (true) {
const char* sep = strchr(remaining_name, '.');
const bool want_subtree = (sep != nullptr);
const uint32_t substr_size = (want_subtree) ? sep - remaining_name : strlen(remaining_name);
if (!substr_size) {
return nullptr;
}
prop_bt* root = nullptr;
uint_least32_t children_offset = atomic_load_explicit(¤t->children, memory_order_relaxed);
if (children_offset != 0) {
root = to_prop_bt(¤t->children);
} else if (alloc_if_needed) {
uint_least32_t new_offset;
root = new_prop_bt(remaining_name, substr_size, &new_offset);
if (root) {
atomic_store_explicit(¤t->children, new_offset, memory_order_release);
}
}
if (!root) {
return nullptr;
}
current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed);
if (!current) {
return nullptr;
}
if (!want_subtree) break;
remaining_name = sep + 1;
}
uint_least32_t prop_offset = atomic_load_explicit(¤t->prop, memory_order_relaxed);
if (prop_offset != 0) {
return to_prop_info(¤t->prop);
} else if (alloc_if_needed) {
uint_least32_t new_offset;
prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset);
if (new_info) {
atomic_store_explicit(¤t->prop, new_offset, memory_order_release);
}
return new_info;
} else {
return nullptr;
}
}
bool prop_area::foreach_property(prop_bt* const trie,
void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
if (!trie) return false;
uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed);
if (left_offset != 0) {
const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie);
if (err < 0) return false;
}
uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed);
if (prop_offset != 0) {
prop_info* info = to_prop_info(&trie->prop);
if (!info) return false;
propfn(info, cookie);
}
uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed);
if (children_offset != 0) {
const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie);
if (err < 0) return false;
}
uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed);
if (right_offset != 0) {
const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie);
if (err < 0) return false;
}
return true;
}
const prop_info* prop_area::find(const char* name) {
return find_property(root_node(), name, strlen(name), nullptr, 0, false);
}
bool prop_area::add(const char* name, unsigned int namelen, const char* value,
unsigned int valuelen) {
return find_property(root_node(), name, namelen, value, valuelen, true);
}
bool prop_area::foreach (void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
return foreach_property(root_node(), propfn, cookie);
}
| webos21/xbionic | platform_bionic-android-vts-12.0_r2/libc/system_properties/prop_area.cpp | C++ | apache-2.0 | 12,084 |
/// Copyright (c) 2009 Microsoft Corporation
///
/// Redistribution and use in source and binary forms, with or without modification, are permitted provided
/// that the following conditions are met:
/// * Redistributions of source code must retain the above copyright notice, this list of conditions and
/// the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
/// the following disclaimer in the documentation and/or other materials provided with the distribution.
/// * Neither the name of Microsoft nor the names of its contributors may be used to
/// endorse or promote products derived from this software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
/// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
/// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
/// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
/// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
/// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ES5Harness.registerTest({
id: "15.2.3.3-2-28",
path: "TestCases/chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-2-28.js",
description: "Object.getOwnPropertyDescriptor - argument 'P' is an integer that converts to a string (value is 123)",
test: function testcase() {
var obj = { "123": 1 };
var desc = Object.getOwnPropertyDescriptor(obj, 123);
return desc.value === 1;
},
precondition: function prereq() {
return fnExists(Object.getOwnPropertyDescriptor);
}
});
| hnafar/IronJS | Src/Tests/ietestcenter/chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-2-28.js | JavaScript | apache-2.0 | 2,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import _root_.io.netty.util.internal.logging.{InternalLoggerFactory, Slf4JLoggerFactory}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.BeforeAndAfterEach
import org.scalatest.Suite
/** Manages a local `sc` `SparkContext` variable, correctly stopping it after each test. */
trait LocalSparkContext extends BeforeAndAfterEach with BeforeAndAfterAll { self: Suite =>
@transient var sc: SparkContext = _
override def beforeAll(): Unit = {
super.beforeAll()
InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE)
}
override def afterEach(): Unit = {
try {
resetSparkContext()
} finally {
super.afterEach()
}
}
def resetSparkContext(): Unit = {
LocalSparkContext.stop(sc)
sc = null
}
}
object LocalSparkContext {
def stop(sc: SparkContext): Unit = {
if (sc != null) {
sc.stop()
}
// To avoid RPC rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.driver.port")
}
/** Runs `f` by passing in `sc` and ensures that `sc` is stopped. */
def withSpark[T](sc: SparkContext)(f: SparkContext => T): T = {
try {
f(sc)
} finally {
stop(sc)
}
}
}
| rezasafi/spark | core/src/test/scala/org/apache/spark/LocalSparkContext.scala | Scala | apache-2.0 | 2,057 |
<!DOCTYPE html>
<html itemscope lang="en-us">
<head><meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta charset="utf-8">
<meta name="HandheldFriendly" content="True">
<meta name="MobileOptimized" content="320">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"><meta name="generator" content="Hugo 0.57.2" />
<meta property="og:title" content="Welcome" />
<meta name="twitter:title" content="welcome"/>
<meta itemprop="name" content="welcome"><meta property="og:description" content="Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017 Warsaw !** -- We are coming back with DevOpsDays in Poland. We are extremely excited and would like to invite everyone from Europe and outside to attend, participate, and perhaps become a speaker. Join us to share the adventure!
On the separate website you can find all the information about DevOpsDays Warsaw 2017.
Dates Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017 Location Warsaw Register Register to attend the conference!" />
<meta name="twitter:description" content="Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017 Warsaw !** -- We are coming back with DevOpsDays in Poland. We are extremely excited and would like to invite everyone from Europe and outside to attend, participate, and perhaps become a speaker. Join us to share the adventure!
On the separate website you can find all the information about DevOpsDays Warsaw 2017.
Dates Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017 Location Warsaw Register Register to attend the conference!" />
<meta itemprop="description" content="Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017 Warsaw !** -- We are coming back with DevOpsDays in Poland. We are extremely excited and would like to invite everyone from Europe and outside to attend, participate, and perhaps become a speaker. Join us to share the adventure!
On the separate website you can find all the information about DevOpsDays Warsaw 2017.
Dates Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017 Location Warsaw Register Register to attend the conference!"><meta name="twitter:site" content="@devopsdays">
<meta property="og:type" content="event" />
<meta property="og:url" content="/events/2017-warsaw/welcome/" /><meta name="twitter:label1" value="Event" />
<meta name="twitter:data1" value="devopsdays Warsaw 2017" /><meta name="twitter:label2" value="Dates" />
<meta name="twitter:data2" value="November 20 - 21, 2017" /><meta property="og:image" content="https://www.devopsdays.org/img/sharing.jpg" />
<meta name="twitter:card" content="summary_large_image" />
<meta name="twitter:image" content="https://www.devopsdays.org/img/sharing.jpg" />
<meta itemprop="image" content="https://www.devopsdays.org/img/sharing.jpg" />
<meta property="fb:app_id" content="1904065206497317" /><meta itemprop="wordCount" content="116">
<title>devopsdays Warsaw 2017
</title>
<script>
window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
ga('create', 'UA-9713393-1', 'auto');
ga('send', 'pageview');
</script>
<script async src='https://www.google-analytics.com/analytics.js'></script>
<link href="/css/site.css" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Roboto+Condensed:300,400,700" rel="stylesheet"><link rel="apple-touch-icon" sizes="57x57" href="/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="/android-icon-192x192.png">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png">
<link rel="manifest" href="/manifest.json">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="/ms-icon-144x144.png">
<meta name="theme-color" content="#ffffff">
<link href="/events/index.xml" rel="alternate" type="application/rss+xml" title="DevOpsDays" />
<link href="/events/index.xml" rel="feed" type="application/rss+xml" title="DevOpsDays" />
<script src=/js/devopsdays-min.js></script></head>
<body lang="">
<nav class="navbar navbar-expand-md navbar-light">
<a class="navbar-brand" href="/">
<img src="/img/devopsdays-brain.png" height="30" class="d-inline-block align-top" alt="devopsdays Logo">
DevOpsDays
</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarSupportedContent" aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarSupportedContent">
<ul class="navbar-nav mr-auto"><li class="nav-item global-navigation"><a class = "nav-link" href="/events">events</a></li><li class="nav-item global-navigation"><a class = "nav-link" href="/blog">blog</a></li><li class="nav-item global-navigation"><a class = "nav-link" href="/sponsor">sponsor</a></li><li class="nav-item global-navigation"><a class = "nav-link" href="/speaking">speaking</a></li><li class="nav-item global-navigation"><a class = "nav-link" href="/organizing">organizing</a></li><li class="nav-item global-navigation"><a class = "nav-link" href="/about">about</a></li></ul>
</div>
</nav>
<nav class="navbar event-navigation navbar-expand-md navbar-light">
<a href="/events/2017-warsaw" class="nav-link">Warsaw</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbar2">
<span class="navbar-toggler-icon"></span>
</button>
<div class="navbar-collapse collapse" id="navbar2">
<ul class="navbar-nav"><li class="nav-item active">
<a class="nav-link" href="/events/2017-warsaw/propose">propose</a>
</li><li class="nav-item active">
<a class="nav-link" href="/events/2017-warsaw/location">location</a>
</li><li class="nav-item active">
<a class="nav-link" href="/events/2017-warsaw/sponsor">sponsor</a>
</li><li class="nav-item active">
<a class="nav-link" href="/events/2017-warsaw/contact">contact</a>
</li><li class="nav-item active">
<a class="nav-link" href="/events/2017-warsaw/conduct">conduct</a>
</li></ul>
</div>
</nav>
<div class="container-fluid">
<div class="row">
<div class="col-md-12"><div class="row">
<div class="col-md-12 content-text"><div class="row">
<div class = "col-md-12 welcome-page-masthead">
<div class = "row">
<div class="col-xs-12 col-md-8">
<h1 class = "welcome-page">devopsdays Warsaw</h1><span class="welcome-page-date">
November 20 - 21, 2017<br />
</span><br /><span class = "welcome-page-masthead-venue">Warsaw</span>
<br /><i>Other Warsaw Events</i><br /><a href = "/events/2014-warsaw" class="welcome-page-masthead-link">2014</a> <a href = "/events/2015-warsaw" class="welcome-page-masthead-link">2015</a> <a href = "/events/2016-warsaw" class="welcome-page-masthead-link">2016</a> <a href = "/events/2018-warsaw" class="welcome-page-masthead-link">2018</a> <a href = "/events/2019-warsaw" class="welcome-page-masthead-link">2019</a> <br /><br /></div>
<div class="col-xs-12 col-md-4"><div class = "row">
</div><style>
a.jssocials-share-link {
color: white;
border-color: white;
}
a:hover.jssocials-share-link {
color: #bfbfc1;
border-color: #bfbfc1;
}
</style><div class = "row">
<div class = "col-md-12 offset-1 offset-md-0 welcome-page-cta">
<div id="share"></div>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-md-12 content-text">
<div style="text-align:center;">
<h2>Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017</h2>
</div>
<div style="text-align:center;margin: 40px">
<img alt="devopsdays Warsaw 2017" src="/events/2017-warsaw/logo.png" class="welcome-page-event-logo"/>
</div>
<!--**devopsdays is coming to <a href = "/events/2017-warsaw/location">Warsaw</a>
!** -->
<p>We are coming back with DevOpsDays in <a href="https://www.youtube.com/watch?v=Qbu_FRg8vuU">Poland</a>.
We are extremely excited and would like to invite everyone from Europe and outside to attend, participate, and perhaps become a speaker.
Join us to share the adventure!</p>
<p><a href="http://2017.devopsdays.pl">On the separate website</a> you can find all the information about DevOpsDays Warsaw 2017.</p>
<div class = "row">
<div class = "col-md-2">
<strong>Dates</strong>
</div>
<div class = "col-md-8">
Monday, Nov 20, 2017 - Tuesday, Nov 21, 2017
</div>
</div>
<div class = "row">
<div class = "col-md-2">
<strong>Location</strong>
</div>
<div class = "col-md-8">
<a href = "/events/2017-warsaw/location">Warsaw</a>
</div>
</div>
<div class = "row">
<div class = "col-md-2">
<strong>Register</strong>
</div>
<div class = "col-md-8">
<!-- <a href = "/events/2017-warsaw/registration">Register to attend the conference!</a> -->
<a href="https://docs.google.com/forms/d/e/1FAIpQLSd2Y2glHixJA2QvyKgUuaIszV9AfUtElkHoRvP4yj8rWf_XOw/viewform">Register to attend the conference!</a>
</div>
</div>
<div class = "row">
<div class = "col-md-2">
<strong>Propose</strong>
</div>
<div class = "col-md-8">
<a href = "/events/2017-warsaw/propose">Propose a talk!</a>
</div>
</div>
<!-- <div class = "row">
<div class = "col-md-2">
<strong>Program</strong>
</div>
<div class = "col-md-8">
View the <a href = "/events/2017-warsaw/program">program.</a>
</div>
</div> -->
<!-- <div class = "row">
<div class = "col-md-2">
<strong>Speakers</strong>
</div>
<div class = "col-md-8">
Check out the <a href = "/events/2017-warsaw/speakers">speakers!</a>
</div>
</div> -->
<div class = "row">
<div class = "col-md-2">
<strong>Sponsors</strong>
</div>
<div class = "col-md-8">
<a href = "/events/2017-warsaw/sponsor">Sponsor the conference!</a>
</div>
</div>
<div class = "row">
<div class = "col-md-2">
<strong>Contact</strong>
</div>
<div class = "col-md-8">
<a href = "/events/2017-warsaw/contact">Get in touch with the organizers</a>
</div>
</div>
<!-- add your city twitter name here without the @ sign -->
<!--
<a href="https://twitter.com/devopsdaysyourcity" class="twitter-follow-button" data-show-count="false">Follow @devopsdaysyourcity</a><script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>
-->
</div>
</div>
<br /><div class="row cta-row">
<div class="col-md-12"><h4 class="sponsor-cta">Platinum Sponsors</h4><a href = "/events/2017-warsaw/sponsor" class="sponsor-cta"><i>Join as Platinum Sponsor!</i>
</a></div>
</div><div class="row sponsor-row"></div><div class="row cta-row">
<div class="col-md-12"><h4 class="sponsor-cta">Gold Sponsors</h4><a href = "/events/2017-warsaw/sponsor" class="sponsor-cta"><i>Join as Gold Sponsor!</i>
</a></div>
</div><div class="row sponsor-row"></div><div class="row cta-row">
<div class="col-md-12"><h4 class="sponsor-cta">Exhibitor Sponsors</h4><a href = "/events/2017-warsaw/sponsor" class="sponsor-cta"><i>Join as Exhibitor Sponsor!</i>
</a></div>
</div><div class="row sponsor-row"></div><div class="row cta-row">
<div class="col-md-12"><h4 class="sponsor-cta">Sponsor Sponsors</h4><a href = "/events/2017-warsaw/sponsor" class="sponsor-cta"><i>Join as Sponsor Sponsor!</i>
</a></div>
</div><div class="row sponsor-row"></div><div class="row cta-row">
<div class="col-md-12"><h4 class="sponsor-cta">Community Sponsors</h4><a href = "/events/2017-warsaw/sponsor" class="sponsor-cta"><i>Join as Community Sponsor!</i>
</a></div>
</div><div class="row sponsor-row"></div><br />
</div>
</div>
</div></div>
</div>
<nav class="navbar bottom navbar-light footer-nav-row" style="background-color: #bfbfc1;">
<div class = "row">
<div class = "col-md-12 footer-nav-background">
<div class = "row">
<div class = "col-md-6 col-lg-3 footer-nav-col">
<h3 class="footer-nav">@DEVOPSDAYS</h3>
<div>
<a class="twitter-timeline" data-dnt="true" href="https://twitter.com/devopsdays/lists/devopsdays" data-chrome="noheader" height="440"></a>
<script>
! function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0],
p = /^http:/.test(d.location) ? 'http' : 'https';
if (!d.getElementById(id)) {
js = d.createElement(s);
js.id = id;
js.src = p + "://platform.twitter.com/widgets.js";
fjs.parentNode.insertBefore(js, fjs);
}
}(document, "script", "twitter-wjs");
</script>
</div>
</div>
<div class="col-md-6 col-lg-3 footer-nav-col footer-content">
<h3 class="footer-nav">BLOG</h3><a href = "https://www.devopsdays.org/blog/2019/05/10/10-years-of-devopsdays/"><h1 class = "footer-heading">10 years of devopsdays</h1></a><h2 class="footer-heading">by Kris Buytaert - 10 May, 2019</h2><p class="footer-content">It’s hard to believe but it is almost 10 years ago since #devopsdays happened for the first time in Gent. Back then there were almost 70 of us talking about topics that were of interest to both Operations and Development, we were exchanging our ideas and experiences `on how we were improving the quality of software delivery.
Our ideas got started on the crossroads of Open Source, Agile and early Cloud Adoption.</p><a href = "https://www.devopsdays.org/blog/"><h1 class = "footer-heading">Blogs</h1></a><h2 class="footer-heading">10 May, 2019</h2><p class="footer-content"></p><a href="https://www.devopsdays.org/blog/index.xml">Feed</a>
</div>
<div class="col-md-6 col-lg-3 footer-nav-col">
<h3 class="footer-nav">CFP OPEN</h3><a href = "/events/2019-campinas" class = "footer-content">Campinas</a><br /><a href = "/events/2019-macapa" class = "footer-content">Macapá</a><br /><a href = "/events/2019-shanghai" class = "footer-content">Shanghai</a><br /><a href = "/events/2019-recife" class = "footer-content">Recife</a><br /><a href = "/events/2020-charlotte" class = "footer-content">Charlotte</a><br /><a href = "/events/2020-prague" class = "footer-content">Prague</a><br /><a href = "/events/2020-tokyo" class = "footer-content">Tokyo</a><br /><a href = "/events/2020-salt-lake-city" class = "footer-content">Salt Lake City</a><br />
<br />Propose a talk at an event near you!<br />
</div>
<div class="col-md-6 col-lg-3 footer-nav-col">
<h3 class="footer-nav">About</h3>
devopsdays is a worldwide community conference series for anyone interested in IT improvement.<br /><br />
<a href="/about/" class = "footer-content">About devopsdays</a><br />
<a href="/privacy/" class = "footer-content">Privacy Policy</a><br />
<a href="/conduct/" class = "footer-content">Code of Conduct</a>
<br />
<br />
<a href="https://www.netlify.com">
<img src="/img/netlify-light.png" alt="Deploys by Netlify">
</a>
</div>
</div>
</div>
</div>
</nav>
<script>
$(document).ready(function () {
$("#share").jsSocials({
shares: ["email", {share: "twitter", via: ''}, "facebook", "linkedin"],
text: 'devopsdays Warsaw - 2017',
showLabel: false,
showCount: false
});
});
</script>
</body>
</html>
| gomex/devopsdays-web | static/events/2017-warsaw/welcome/index.html | HTML | apache-2.0 | 16,576 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Software adjust volume of samples, allows each audio stream its own
// volume without impacting master volume for chrome and other applications.
// Implemented as templates to allow 8, 16 and 32 bit implementations.
// 8 bit is unsigned and biased by 128.
// TODO(vrk): This file has been running pretty wild and free, and it's likely
// that a lot of the functions can be simplified and made more elegant. Revisit
// after other audio cleanup is done. (crbug.com/120319)
#include "media/audio/audio_util.h"
#include "base/command_line.h"
#include "base/string_number_conversions.h"
#include "base/time.h"
#include "media/base/media_switches.h"
#if defined(OS_WIN)
#include "base/win/windows_version.h"
#endif
namespace media {
// Returns user buffer size as specified on the command line or 0 if no buffer
// size has been specified.
int GetUserBufferSize() {
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
int buffer_size = 0;
std::string buffer_size_str(cmd_line->GetSwitchValueASCII(
switches::kAudioBufferSize));
if (base::StringToInt(buffer_size_str, &buffer_size) && buffer_size > 0)
return buffer_size;
return 0;
}
// Computes a buffer size based on the given |sample_rate|. Must be used in
// conjunction with AUDIO_PCM_LINEAR.
size_t GetHighLatencyOutputBufferSize(int sample_rate) {
int user_buffer_size = GetUserBufferSize();
if (user_buffer_size)
return user_buffer_size;
// TODO(vrk/crogers): The buffer sizes that this function computes is probably
// overly conservative. However, reducing the buffer size to 2048-8192 bytes
// caused crbug.com/108396. This computation should be revisited while making
// sure crbug.com/108396 doesn't happen again.
// The minimum number of samples in a hardware packet.
// This value is selected so that we can handle down to 5khz sample rate.
static const size_t kMinSamplesPerHardwarePacket = 1024;
// The maximum number of samples in a hardware packet.
// This value is selected so that we can handle up to 192khz sample rate.
static const size_t kMaxSamplesPerHardwarePacket = 64 * 1024;
// This constant governs the hardware audio buffer size, this value should be
// chosen carefully.
// This value is selected so that we have 8192 samples for 48khz streams.
static const size_t kMillisecondsPerHardwarePacket = 170;
// Select the number of samples that can provide at least
// |kMillisecondsPerHardwarePacket| worth of audio data.
size_t samples = kMinSamplesPerHardwarePacket;
while (samples <= kMaxSamplesPerHardwarePacket &&
samples * base::Time::kMillisecondsPerSecond <
sample_rate * kMillisecondsPerHardwarePacket) {
samples *= 2;
}
return samples;
}
#if defined(OS_WIN)
int NumberOfWaveOutBuffers() {
// Use the user provided buffer count if provided.
int buffers = 0;
std::string buffers_str(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
switches::kWaveOutBuffers));
if (base::StringToInt(buffers_str, &buffers) && buffers > 0) {
return buffers;
}
// Use 4 buffers for Vista, 3 for everyone else:
// - The entire Windows audio stack was rewritten for Windows Vista and wave
// out performance was degraded compared to XP.
// - The regression was fixed in Windows 7 and most configurations will work
// with 2, but some (e.g., some Sound Blasters) still need 3.
// - Some XP configurations (even multi-processor ones) also need 3.
return (base::win::GetVersion() == base::win::VERSION_VISTA) ? 4 : 3;
}
#endif
} // namespace media
| plxaye/chromium | src/media/audio/audio_util.cc | C++ | apache-2.0 | 3,748 |
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.jetbrains.jsonSchema.extension;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.VfsUtil;
import com.intellij.openapi.vfs.VfsUtilCore;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.vfs.impl.http.HttpVirtualFile;
import com.intellij.util.containers.ContainerUtil;
import com.jetbrains.jsonSchema.impl.JsonSchemaType;
import com.jetbrains.jsonSchema.impl.JsonSchemaVersion;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.io.File;
import java.util.Set;
public class JsonSchemaInfo {
@Nullable private final JsonSchemaFileProvider myProvider;
@Nullable private final String myUrl;
@NotNull private final static Set<String> myDumbNames = ContainerUtil.set(
"schema",
"lib",
"cli",
"packages",
"master",
"format",
"angular", // the only angular-related schema is the 'angular-cli', so we skip the repo name
"config");
public JsonSchemaInfo(@NotNull JsonSchemaFileProvider provider) {
myProvider = provider;
myUrl = null;
}
public JsonSchemaInfo(@NotNull String url) {
myUrl = url;
myProvider = null;
}
@Nullable
public JsonSchemaFileProvider getProvider() {
return myProvider;
}
@NotNull
public String getUrl(Project project) {
if (myProvider != null) {
String remoteSource = myProvider.getRemoteSource();
if (remoteSource != null) {
return remoteSource;
}
VirtualFile schemaFile = myProvider.getSchemaFile();
if (schemaFile == null) return "";
if (schemaFile instanceof HttpVirtualFile) {
return schemaFile.getUrl();
}
return getRelativePath(project, schemaFile.getPath());
}
else {
assert myUrl != null;
return myUrl;
}
}
@NotNull
public String getDescription() {
if (myProvider != null) {
String providerName = myProvider.getPresentableName();
return sanitizeName(providerName);
}
assert myUrl != null;
// the only weird case
if ("http://json.schemastore.org/config".equals(myUrl)
|| "https://schemastore.azurewebsites.net/schemas/json/config.json".equals(myUrl)) {
return "asp.net config";
}
String url = myUrl.replace('\\', '/');
return ContainerUtil.reverse(StringUtil.split(url, "/"))
.stream()
.map(p -> sanitizeName(p))
.filter(p -> !isVeryDumbName(p))
.findFirst().orElse(sanitizeName(myUrl));
}
public static boolean isVeryDumbName(@Nullable String possibleName) {
if (StringUtil.isEmptyOrSpaces(possibleName) || myDumbNames.contains(possibleName)) return true;
return StringUtil.split(possibleName, ".").stream().allMatch(s -> JsonSchemaType.isInteger(s));
}
@NotNull
private static String sanitizeName(@NotNull String providerName) {
return StringUtil.trimEnd(StringUtil.trimEnd(StringUtil.trimEnd(providerName, ".json"), "-schema"), ".schema");
}
@NotNull
public JsonSchemaVersion getSchemaVersion() {
return myProvider != null ? myProvider.getSchemaVersion() : JsonSchemaVersion.SCHEMA_4;
}
@NotNull
public static String getRelativePath(@NotNull Project project, @NotNull String text) {
text = text.trim();
if (project.isDefault() || project.getBasePath() == null) return text;
if (StringUtil.isEmptyOrSpaces(text)) return text;
final File ioFile = new File(text);
if (!ioFile.isAbsolute()) return text;
VirtualFile file = VfsUtil.findFileByIoFile(ioFile, false);
if (file == null) return text;
final String relativePath = VfsUtilCore.getRelativePath(file, project.getBaseDir());
if (relativePath != null) return relativePath;
if (isMeaningfulAncestor(VfsUtilCore.getCommonAncestor(file, project.getBaseDir()))) {
String path = VfsUtilCore.findRelativePath(project.getBaseDir(), file, File.separatorChar);
if (path != null) return path;
}
return text;
}
private static boolean isMeaningfulAncestor(@Nullable VirtualFile ancestor) {
if (ancestor == null) return false;
VirtualFile homeDir = VfsUtil.getUserHomeDir();
return homeDir != null && VfsUtilCore.isAncestor(homeDir, ancestor, true);
}
}
| goodwinnk/intellij-community | json/src/com/jetbrains/jsonSchema/extension/JsonSchemaInfo.java | Java | apache-2.0 | 4,405 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/machinelearning/MachineLearning_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/machinelearning/model/BatchPrediction.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace MachineLearning
{
namespace Model
{
/**
* <p>Represents the output of a <code>DescribeBatchPredictions</code> operation.
* The content is essentially a list of
* <code>BatchPrediction</code>s.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeBatchPredictionsOutput">AWS
* API Reference</a></p>
*/
class AWS_MACHINELEARNING_API DescribeBatchPredictionsResult
{
public:
DescribeBatchPredictionsResult();
DescribeBatchPredictionsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DescribeBatchPredictionsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>A list of <code>BatchPrediction</code> objects that meet the search criteria.
* </p>
*/
inline const Aws::Vector<BatchPrediction>& GetResults() const{ return m_results; }
/**
* <p>A list of <code>BatchPrediction</code> objects that meet the search criteria.
* </p>
*/
inline void SetResults(const Aws::Vector<BatchPrediction>& value) { m_results = value; }
/**
* <p>A list of <code>BatchPrediction</code> objects that meet the search criteria.
* </p>
*/
inline void SetResults(Aws::Vector<BatchPrediction>&& value) { m_results = std::move(value); }
/**
* <p>A list of <code>BatchPrediction</code> objects that meet the search criteria.
* </p>
*/
inline DescribeBatchPredictionsResult& WithResults(const Aws::Vector<BatchPrediction>& value) { SetResults(value); return *this;}
/**
* <p>A list of <code>BatchPrediction</code> objects that meet the search criteria.
* </p>
*/
inline DescribeBatchPredictionsResult& WithResults(Aws::Vector<BatchPrediction>&& value) { SetResults(std::move(value)); return *this;}
/**
* <p>A list of <code>BatchPrediction</code> objects that meet the search criteria.
* </p>
*/
inline DescribeBatchPredictionsResult& AddResults(const BatchPrediction& value) { m_results.push_back(value); return *this; }
/**
* <p>A list of <code>BatchPrediction</code> objects that meet the search criteria.
* </p>
*/
inline DescribeBatchPredictionsResult& AddResults(BatchPrediction&& value) { m_results.push_back(std::move(value)); return *this; }
/**
* <p>The ID of the next page in the paginated results that indicates at least one
* more page follows.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>The ID of the next page in the paginated results that indicates at least one
* more page follows.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>The ID of the next page in the paginated results that indicates at least one
* more page follows.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>The ID of the next page in the paginated results that indicates at least one
* more page follows.</p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>The ID of the next page in the paginated results that indicates at least one
* more page follows.</p>
*/
inline DescribeBatchPredictionsResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>The ID of the next page in the paginated results that indicates at least one
* more page follows.</p>
*/
inline DescribeBatchPredictionsResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>The ID of the next page in the paginated results that indicates at least one
* more page follows.</p>
*/
inline DescribeBatchPredictionsResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
private:
Aws::Vector<BatchPrediction> m_results;
Aws::String m_nextToken;
};
} // namespace Model
} // namespace MachineLearning
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-machinelearning/include/aws/machinelearning/model/DescribeBatchPredictionsResult.h | C | apache-2.0 | 4,693 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.performance.results;
public class CrossBuildResultsStore extends BaseCrossBuildResultsStore<CrossBuildPerformanceResults> {
public CrossBuildResultsStore() {
super("cross_build_results");
}
}
| gradle/gradle | subprojects/internal-performance-testing/src/main/groovy/org/gradle/performance/results/CrossBuildResultsStore.java | Java | apache-2.0 | 846 |
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
#TODO - Include -race while creating the coverage profile.
go test -coverprofile=profile.out -covermode=atomic $d
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done
| openebs/mayaserver | buildscripts/test-cov.sh | Shell | apache-2.0 | 329 |
/* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.xmlbeans.samples.substitutiongroup;
import org.apache.xmlbeans.XmlCursor;
import org.apache.xmlbeans.XmlError;
import org.apache.xmlbeans.XmlObject;
import org.apache.xmlbeans.XmlOptions;
import org.apache.xmlbeans.XmlException;
import org.apache.xmlbeans.samples.substitutiongroup.easypo.PurchaseOrderDocument;
import org.apache.xmlbeans.samples.substitutiongroup.easypo.InvoiceHeaderDocument;
import org.apache.xmlbeans.samples.substitutiongroup.easypo.NameAddress;
import org.apache.xmlbeans.samples.substitutiongroup.easypo.BookType;
import org.apache.xmlbeans.samples.substitutiongroup.easypo.ClothingType;
import javax.xml.namespace.QName;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
/**
* This sample illustrates how you can access substitution group element names and
* values defined in the XML document. This sample also demonstrates how to write
* substitution group elements.
*
* The schema used by this sample are defined in EasyPo.xsd
*/
public class SubstitutionGroup
{
/**
* Receives an XML Instance and prints the substitution group element names and values,
* Also creates a new XML Instance.
*
* @param args An array containing
* (a)Path to the XML Instance conforming to the XML schema in EasyPo.xsd.
* (b)Path for creating a new XML Instance.
*/
public static void main(String args[])
{
// Create an instance of this class to work with.
SubstitutionGroup subGrp = new SubstitutionGroup();
// Create an instance of a type based on the received XML's schema
PurchaseOrderDocument poDoc = subGrp.parseXml(args[0]);
// Validate it
validateXml(poDoc);
// Prints the comments from the XML, also the name of the substitute tokens.
subGrp.printComments(poDoc);
// Creates a new XML and saves the file
subGrp.createDocument(poDoc,args[1]);
}
public PurchaseOrderDocument parseXml(String file)
{
File xmlfile = new File(file);
PurchaseOrderDocument poDoc = null;
try
{
poDoc = PurchaseOrderDocument.Factory.parse(xmlfile);
}
catch(XmlException e){
e.printStackTrace();
}
catch(IOException e){
e.printStackTrace();
}
return poDoc;
}
/**
* This method prints the substitution group element names(local part) and values for each Invoice-header
* element in the XML Instance. (The rest of elements are ignored for the sake of simplicity)
*/
public void printComments(PurchaseOrderDocument poDoc)
{
// Get object reference of root element.
PurchaseOrderDocument.PurchaseOrder purchaseOrderElement = poDoc.getPurchaseOrder();
// Get all the invoice-header elements for purchase-order.
InvoiceHeaderDocument.InvoiceHeader[] invHeaders = purchaseOrderElement.getInvoiceHeaderArray();
System.out.println("\n\n=========Contents==========\n");
// Iterate through each invoice-header elements printing only the element name and value for substitution group
// comment as defined in Easypo.xsd.
for (int i=0;i<invHeaders.length;i++){
System.out.println("\nInvoiceHeader["+i+"]");
XmlCursor cursor = invHeaders[i].xgetComment().newCursor();
System.out.println("Element Name (Local Part): " + cursor.getName().getLocalPart());
System.out.println("Element Value: " + invHeaders[i].getComment().trim());
cursor.dispose();
}
}
/**
* This method creates an new invoice-header element and attaches to the existing XML Instance, and saves the
* new Instance to a file(args[1]).
*/
public PurchaseOrderDocument createDocument(PurchaseOrderDocument poDoc, String file)
{
// Get object reference of root element.
PurchaseOrderDocument.PurchaseOrder purchaseOrderElement = poDoc.getPurchaseOrder();
InvoiceHeaderDocument.InvoiceHeader invHeaders = purchaseOrderElement.addNewInvoiceHeader();
// Assign values to the newly created invoice-header element.
NameAddress shipto = invHeaders.addNewShipTo();
shipto.setName("New Company");
shipto.setAddress("NewTown, NewCity");
NameAddress billto = invHeaders.addNewBillTo();
billto.setName("New Company");
billto.setAddress("NewTown, NewCity");
// Create a new Book and add it to the invoice.
BookType book = BookType.Factory.newInstance();
book.setId(1000);
book.setTitle("Where the Red Fern Grows");
invHeaders.setProduct(book);
XmlCursor cursor = invHeaders.getProduct().newCursor();
cursor.setName(new QName("http://xmlbeans.apache.org/samples/substitutiongroup/easypo", "book"));
cursor.dispose();
// Creating a new comment - with substitution group member bill-comment element.
invHeaders.setComment("This is a new bill-comment");
cursor = invHeaders.xgetComment().newCursor();
cursor.setName(new QName("http://xmlbeans.apache.org/samples/substitutiongroup/easypo", "bill-comment"));
cursor.dispose();
// Add another invoice-header.
invHeaders = purchaseOrderElement.addNewInvoiceHeader();
// Assign values to the newly created invoice-header element.
shipto = invHeaders.addNewShipTo();
shipto.setName("Other Company");
shipto.setAddress("OtherTown, OtherCity");
billto = invHeaders.addNewBillTo();
billto.setName("Other Company");
billto.setAddress("OtherTown, OtherCity");
// Create a new Clothing and add it to the invoice.
ClothingType clothing = ClothingType.Factory.newInstance();
clothing.setId(2000);
clothing.setColor(ClothingType.Color.BLUE);
invHeaders.setProduct(clothing);
cursor = invHeaders.getProduct().newCursor();
cursor.setName(new QName("http://xmlbeans.apache.org/samples/substitutiongroup/easypo", "clothing"));
cursor.dispose();
// Creating a new comment - with substitution group member bill-comment element.
invHeaders.setComment("This is a new bill-comment");
cursor = invHeaders.xgetComment().newCursor();
cursor.setName(new QName("http://xmlbeans.apache.org/samples/substitutiongroup/easypo", "ship-comment"));
cursor.dispose();
// Validate it.
validateXml(poDoc);
XmlOptions xmlOptions = new XmlOptions();
xmlOptions.setSavePrettyPrint();
File f = new File(file);
try
{
//Writing the XML Instance to a file.
poDoc.save(f,xmlOptions);
}
catch(IOException e)
{
e.printStackTrace();
}
System.out.println("\n\n\nXML Instance Document saved at : " + f.getPath());
return poDoc;
}
/**
* <p>Validates the XML, printing error messages when the XML is invalid. Note
* that this method will properly validate any instance of a compiled schema
* type because all of these types extend XmlObject.</p>
*
* <p>Note that in actual practice, you'll probably want to use an assertion
* when validating if you want to ensure that your code doesn't pass along
* invalid XML. This sample prints the generated XML whether or not it's
* valid so that you can see the result in both cases.</p>
*
* @param xml The XML to validate.
* @return <code>true</code> if the XML is valid; otherwise, <code>false</code>
*/
public static boolean validateXml(XmlObject xml)
{
boolean isXmlValid = false;
// A collection instance to hold validation error messages.
ArrayList validationMessages = new ArrayList();
// Validate the XML, collecting messages.
isXmlValid = xml.validate(
new XmlOptions().setErrorListener(validationMessages));
// If the XML isn't valid, print the messages.
if (!isXmlValid)
{
System.out.println("\nInvalid XML: ");
for (int i = 0; i < validationMessages.size(); i++)
{
XmlError error = (XmlError) validationMessages.get(i);
System.out.println(error.getMessage());
System.out.println(error.getObjectLocation());
}
}
return isXmlValid;
}
}
| apache/xmlbeans | samples/SubstitutionGroup/src/org/apache/xmlbeans/samples/substitutiongroup/SubstitutionGroup.java | Java | apache-2.0 | 9,175 |
/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* FIPS-180-1 compliant SHA-1 implementation
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
/*
* The SHA-1 standard was published by NIST in 1993.
*
* http://www.itl.nist.gov/fipspubs/fip180-1.htm
*/
#include "debugger-sha1.h"
#include "jext-common.h"
#if defined(JERRY_DEBUGGER) && (JERRY_DEBUGGER == 1)
/**
* SHA-1 context structure.
*/
typedef struct
{
uint32_t total[2]; /**< number of bytes processed */
uint32_t state[5]; /**< intermediate digest state */
uint8_t buffer[64]; /**< data block being processed */
} jerryx_sha1_context;
/* 32-bit integer manipulation macros (big endian). */
#define JERRYX_SHA1_GET_UINT32_BE(n, b, i) \
{ \
(n) = (((uint32_t) (b)[(i) + 0]) << 24) | (((uint32_t) (b)[(i) + 1]) << 16) | (((uint32_t) (b)[(i) + 2]) << 8) \
| ((uint32_t) (b)[(i) + 3]); \
}
#define JERRYX_SHA1_PUT_UINT32_BE(n, b, i) \
{ \
(b)[(i) + 0] = (uint8_t) ((n) >> 24); \
(b)[(i) + 1] = (uint8_t) ((n) >> 16); \
(b)[(i) + 2] = (uint8_t) ((n) >> 8); \
(b)[(i) + 3] = (uint8_t) ((n)); \
}
/**
* Initialize SHA-1 context.
*/
static void
jerryx_sha1_init (jerryx_sha1_context *sha1_context_p) /**< SHA-1 context */
{
memset (sha1_context_p, 0, sizeof (jerryx_sha1_context));
sha1_context_p->total[0] = 0;
sha1_context_p->total[1] = 0;
sha1_context_p->state[0] = 0x67452301;
sha1_context_p->state[1] = 0xEFCDAB89;
sha1_context_p->state[2] = 0x98BADCFE;
sha1_context_p->state[3] = 0x10325476;
sha1_context_p->state[4] = 0xC3D2E1F0;
} /* jerryx_sha1_init */
#define JERRYX_SHA1_P(a, b, c, d, e, x) \
do \
{ \
e += JERRYX_SHA1_SHIFT (a, 5) + JERRYX_SHA1_F (b, c, d) + K + x; \
b = JERRYX_SHA1_SHIFT (b, 30); \
} while (0)
/**
* Update SHA-1 internal buffer status.
*/
static void
jerryx_sha1_process (jerryx_sha1_context *sha1_context_p, /**< SHA-1 context */
const uint8_t data[64]) /**< data buffer */
{
uint32_t temp, W[16], A, B, C, D, E;
JERRYX_SHA1_GET_UINT32_BE (W[0], data, 0);
JERRYX_SHA1_GET_UINT32_BE (W[1], data, 4);
JERRYX_SHA1_GET_UINT32_BE (W[2], data, 8);
JERRYX_SHA1_GET_UINT32_BE (W[3], data, 12);
JERRYX_SHA1_GET_UINT32_BE (W[4], data, 16);
JERRYX_SHA1_GET_UINT32_BE (W[5], data, 20);
JERRYX_SHA1_GET_UINT32_BE (W[6], data, 24);
JERRYX_SHA1_GET_UINT32_BE (W[7], data, 28);
JERRYX_SHA1_GET_UINT32_BE (W[8], data, 32);
JERRYX_SHA1_GET_UINT32_BE (W[9], data, 36);
JERRYX_SHA1_GET_UINT32_BE (W[10], data, 40);
JERRYX_SHA1_GET_UINT32_BE (W[11], data, 44);
JERRYX_SHA1_GET_UINT32_BE (W[12], data, 48);
JERRYX_SHA1_GET_UINT32_BE (W[13], data, 52);
JERRYX_SHA1_GET_UINT32_BE (W[14], data, 56);
JERRYX_SHA1_GET_UINT32_BE (W[15], data, 60);
#define JERRYX_SHA1_SHIFT(x, n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#define JERRYX_SHA1_R(t) \
(temp = W[(t - 3) & 0x0F] ^ W[(t - 8) & 0x0F] ^ W[(t - 14) & 0x0F] ^ W[t & 0x0F], \
W[t & 0x0F] = JERRYX_SHA1_SHIFT (temp, 1))
A = sha1_context_p->state[0];
B = sha1_context_p->state[1];
C = sha1_context_p->state[2];
D = sha1_context_p->state[3];
E = sha1_context_p->state[4];
uint32_t K = 0x5A827999;
#define JERRYX_SHA1_F(x, y, z) (z ^ (x & (y ^ z)))
JERRYX_SHA1_P (A, B, C, D, E, W[0]);
JERRYX_SHA1_P (E, A, B, C, D, W[1]);
JERRYX_SHA1_P (D, E, A, B, C, W[2]);
JERRYX_SHA1_P (C, D, E, A, B, W[3]);
JERRYX_SHA1_P (B, C, D, E, A, W[4]);
JERRYX_SHA1_P (A, B, C, D, E, W[5]);
JERRYX_SHA1_P (E, A, B, C, D, W[6]);
JERRYX_SHA1_P (D, E, A, B, C, W[7]);
JERRYX_SHA1_P (C, D, E, A, B, W[8]);
JERRYX_SHA1_P (B, C, D, E, A, W[9]);
JERRYX_SHA1_P (A, B, C, D, E, W[10]);
JERRYX_SHA1_P (E, A, B, C, D, W[11]);
JERRYX_SHA1_P (D, E, A, B, C, W[12]);
JERRYX_SHA1_P (C, D, E, A, B, W[13]);
JERRYX_SHA1_P (B, C, D, E, A, W[14]);
JERRYX_SHA1_P (A, B, C, D, E, W[15]);
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (16));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (17));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (18));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (19));
#undef JERRYX_SHA1_F
K = 0x6ED9EBA1;
#define JERRYX_SHA1_F(x, y, z) (x ^ y ^ z)
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (20));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (21));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (22));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (23));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (24));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (25));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (26));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (27));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (28));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (29));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (30));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (31));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (32));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (33));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (34));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (35));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (36));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (37));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (38));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (39));
#undef JERRYX_SHA1_F
K = 0x8F1BBCDC;
#define JERRYX_SHA1_F(x, y, z) ((x & y) | (z & (x | y)))
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (40));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (41));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (42));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (43));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (44));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (45));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (46));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (47));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (48));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (49));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (50));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (51));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (52));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (53));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (54));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (55));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (56));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (57));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (58));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (59));
#undef JERRYX_SHA1_F
K = 0xCA62C1D6;
#define JERRYX_SHA1_F(x, y, z) (x ^ y ^ z)
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (60));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (61));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (62));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (63));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (64));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (65));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (66));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (67));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (68));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (69));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (70));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (71));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (72));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (73));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (74));
JERRYX_SHA1_P (A, B, C, D, E, JERRYX_SHA1_R (75));
JERRYX_SHA1_P (E, A, B, C, D, JERRYX_SHA1_R (76));
JERRYX_SHA1_P (D, E, A, B, C, JERRYX_SHA1_R (77));
JERRYX_SHA1_P (C, D, E, A, B, JERRYX_SHA1_R (78));
JERRYX_SHA1_P (B, C, D, E, A, JERRYX_SHA1_R (79));
#undef JERRYX_SHA1_F
sha1_context_p->state[0] += A;
sha1_context_p->state[1] += B;
sha1_context_p->state[2] += C;
sha1_context_p->state[3] += D;
sha1_context_p->state[4] += E;
#undef JERRYX_SHA1_SHIFT
#undef JERRYX_SHA1_R
} /* jerryx_sha1_process */
#undef JERRYX_SHA1_P
/**
* SHA-1 update buffer.
*/
static void
jerryx_sha1_update (jerryx_sha1_context *sha1_context_p, /**< SHA-1 context */
const uint8_t *source_p, /**< source buffer */
size_t source_length) /**< length of source buffer */
{
size_t fill;
uint32_t left;
if (source_length == 0)
{
return;
}
left = sha1_context_p->total[0] & 0x3F;
fill = 64 - left;
sha1_context_p->total[0] += (uint32_t) source_length;
/* Check overflow. */
if (sha1_context_p->total[0] < (uint32_t) source_length)
{
sha1_context_p->total[1]++;
}
if (left && source_length >= fill)
{
memcpy ((void *) (sha1_context_p->buffer + left), source_p, fill);
jerryx_sha1_process (sha1_context_p, sha1_context_p->buffer);
source_p += fill;
source_length -= fill;
left = 0;
}
while (source_length >= 64)
{
jerryx_sha1_process (sha1_context_p, source_p);
source_p += 64;
source_length -= 64;
}
if (source_length > 0)
{
memcpy ((void *) (sha1_context_p->buffer + left), source_p, source_length);
}
} /* jerryx_sha1_update */
/**
* SHA-1 final digest.
*/
static void
jerryx_sha1_finish (jerryx_sha1_context *sha1_context_p, /**< SHA-1 context */
uint8_t destination_p[20]) /**< result */
{
uint8_t buffer[16];
uint32_t high = (sha1_context_p->total[0] >> 29) | (sha1_context_p->total[1] << 3);
uint32_t low = (sha1_context_p->total[0] << 3);
uint32_t last = sha1_context_p->total[0] & 0x3F;
uint32_t padn = (last < 56) ? (56 - last) : (120 - last);
memset (buffer, 0, sizeof (buffer));
buffer[0] = 0x80;
while (padn > sizeof (buffer))
{
jerryx_sha1_update (sha1_context_p, buffer, sizeof (buffer));
buffer[0] = 0;
padn -= (uint32_t) sizeof (buffer);
}
jerryx_sha1_update (sha1_context_p, buffer, padn);
JERRYX_SHA1_PUT_UINT32_BE (high, buffer, 0);
JERRYX_SHA1_PUT_UINT32_BE (low, buffer, 4);
jerryx_sha1_update (sha1_context_p, buffer, 8);
JERRYX_SHA1_PUT_UINT32_BE (sha1_context_p->state[0], destination_p, 0);
JERRYX_SHA1_PUT_UINT32_BE (sha1_context_p->state[1], destination_p, 4);
JERRYX_SHA1_PUT_UINT32_BE (sha1_context_p->state[2], destination_p, 8);
JERRYX_SHA1_PUT_UINT32_BE (sha1_context_p->state[3], destination_p, 12);
JERRYX_SHA1_PUT_UINT32_BE (sha1_context_p->state[4], destination_p, 16);
} /* jerryx_sha1_finish */
#undef JERRYX_SHA1_GET_UINT32_BE
#undef JERRYX_SHA1_PUT_UINT32_BE
/**
* Computes the SHA-1 value of the combination of the two input buffers.
*/
void
jerryx_debugger_compute_sha1 (const uint8_t *source1_p, /**< first part of the input */
size_t source1_length, /**< length of the first part */
const uint8_t *source2_p, /**< second part of the input */
size_t source2_length, /**< length of the second part */
uint8_t destination_p[20]) /**< result */
{
jerryx_sha1_context sha1_context;
jerryx_sha1_init (&sha1_context);
jerryx_sha1_update (&sha1_context, source1_p, source1_length);
jerryx_sha1_update (&sha1_context, source2_p, source2_length);
jerryx_sha1_finish (&sha1_context, destination_p);
} /* jerryx_debugger_compute_sha1 */
#endif /* defined (JERRY_DEBUGGER) && (JERRY_DEBUGGER == 1) */
| dbatyai/jerryscript | jerry-ext/debugger/debugger-sha1.c | C | apache-2.0 | 12,977 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="zh">
<head>
<!-- Generated by javadoc (1.8.0_31) on Mon Feb 02 16:43:55 CST 2015 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>类 org.jb2011.lnf.beautyeye.ch_x.__IconFactory__的使用</title>
<meta name="date" content="2015-02-02">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="\u7C7B org.jb2011.lnf.beautyeye.ch_x.__IconFactory__\u7684\u4F7F\u7528";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>您的浏览器已禁用 JavaScript。</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="跳过导航链接">跳过导航链接</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="导航">
<li><a href="../../../../../../overview-summary.html">概览</a></li>
<li><a href="../package-summary.html">程序包</a></li>
<li><a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/__IconFactory__.html" title="org.jb2011.lnf.beautyeye.ch_x中的类">类</a></li>
<li class="navBarCell1Rev">使用</li>
<li><a href="../package-tree.html">树</a></li>
<li><a href="../../../../../../deprecated-list.html">已过时</a></li>
<li><a href="../../../../../../index-files/index-1.html">索引</a></li>
<li><a href="../../../../../../help-doc.html">帮助</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>上一个</li>
<li>下一个</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/jb2011/lnf/beautyeye/ch_x/class-use/__IconFactory__.html" target="_top">框架</a></li>
<li><a href="__IconFactory__.html" target="_top">无框架</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">所有类</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="类的使用 org.jb2011.lnf.beautyeye.ch_x.__IconFactory__" class="title">类的使用<br>org.jb2011.lnf.beautyeye.ch_x.__IconFactory__</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="使用表, 列表程序包和解释">
<caption><span>使用<a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/__IconFactory__.html" title="org.jb2011.lnf.beautyeye.ch_x中的类">__IconFactory__</a>的程序包</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">程序包</th>
<th class="colLast" scope="col">说明</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#org.jb2011.lnf.beautyeye.ch_x">org.jb2011.lnf.beautyeye.ch_x</a></td>
<td class="colLast">
<div class="block">本包内包含了各种未归类的UI属性设置等。</div>
</td>
</tr>
</tbody>
</table>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="org.jb2011.lnf.beautyeye.ch_x">
<!-- -->
</a>
<h3><a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/package-summary.html">org.jb2011.lnf.beautyeye.ch_x</a>中<a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/__IconFactory__.html" title="org.jb2011.lnf.beautyeye.ch_x中的类">__IconFactory__</a>的使用</h3>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="使用表, 列表方法和解释">
<caption><span>返回<a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/__IconFactory__.html" title="org.jb2011.lnf.beautyeye.ch_x中的类">__IconFactory__</a>的<a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/package-summary.html">org.jb2011.lnf.beautyeye.ch_x</a>中的方法</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">限定符和类型</th>
<th class="colLast" scope="col">方法和说明</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/__IconFactory__.html" title="org.jb2011.lnf.beautyeye.ch_x中的类">__IconFactory__</a></code></td>
<td class="colLast"><span class="typeNameLabel">__IconFactory__.</span><code><span class="memberNameLink"><a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/__IconFactory__.html#getInstance--">getInstance</a></span>()</code>
<div class="block">Gets the single instance of __IconFactory__.</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="跳过导航链接">跳过导航链接</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="导航">
<li><a href="../../../../../../overview-summary.html">概览</a></li>
<li><a href="../package-summary.html">程序包</a></li>
<li><a href="../../../../../../org/jb2011/lnf/beautyeye/ch_x/__IconFactory__.html" title="org.jb2011.lnf.beautyeye.ch_x中的类">类</a></li>
<li class="navBarCell1Rev">使用</li>
<li><a href="../package-tree.html">树</a></li>
<li><a href="../../../../../../deprecated-list.html">已过时</a></li>
<li><a href="../../../../../../index-files/index-1.html">索引</a></li>
<li><a href="../../../../../../help-doc.html">帮助</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>上一个</li>
<li>下一个</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/jb2011/lnf/beautyeye/ch_x/class-use/__IconFactory__.html" target="_top">框架</a></li>
<li><a href="__IconFactory__.html" target="_top">无框架</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">所有类</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| Cochitect/beautyeye | doc/api_doc/org/jb2011/lnf/beautyeye/ch_x/class-use/__IconFactory__.html | HTML | apache-2.0 | 7,016 |
/*******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2012 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.core.row;
import java.util.Date;
import org.pentaho.di.core.exception.KettleValueException;
import org.pentaho.di.core.util.StringUtil;
public class SpeedTest
{
private Object[] rowString10;
private Object[] rowString100;
private Object[] rowString1000;
private Object[] rowMixed10;
private Object[] rowMixed100;
private Object[] rowMixed1000;
private RowMetaInterface metaString10;
private RowMetaInterface metaMixed10;
private RowMetaInterface metaString100;
private RowMetaInterface metaMixed100;
private RowMetaInterface metaString1000;
private RowMetaInterface metaMixed1000;
public SpeedTest()
{
rowString10 = new Object[10];
rowString100 = new Object[100];
rowString1000 = new Object[1000];
rowMixed10 = new Object[50];
rowMixed100 = new Object[500];
rowMixed1000 = new Object[5000];
metaString10 = new RowMeta();
metaMixed10 = new RowMeta();
metaString100 = new RowMeta();
metaMixed100 = new RowMeta();
metaString1000 = new RowMeta();
metaMixed1000 = new RowMeta();
for (int i=0;i<10;i++)
{
populateMetaAndData(i, rowString10, metaString10, rowMixed10, metaMixed10);
}
for (int i=0;i<100;i++)
{
populateMetaAndData(i, rowString100, metaString100, rowMixed100, metaMixed100);
}
for (int i=0;i<1000;i++)
{
populateMetaAndData(i, rowString1000, metaString1000, rowMixed1000, metaMixed1000);
}
}
private static void populateMetaAndData(int i, Object[] rowString10, RowMetaInterface metaString10, Object[] rowMixed10, RowMetaInterface metaMixed10)
{
rowString10[i] = StringUtil.generateRandomString(20, "", "", false);
ValueMetaInterface meta = new ValueMeta("String"+(i+1), ValueMetaInterface.TYPE_STRING, 20, 0);
metaString10.addValueMeta(meta);
rowMixed10[i*5 + 0] = StringUtil.generateRandomString(20, "", "", false);
ValueMetaInterface meta0 = new ValueMeta("String"+(i*5+1), ValueMetaInterface.TYPE_STRING, 20, 0);
metaMixed10.addValueMeta(meta0);
rowMixed10[i*5 + 1] = new Date();
ValueMetaInterface meta1 = new ValueMeta("String"+(i*5+1), ValueMetaInterface.TYPE_DATE);
metaMixed10.addValueMeta(meta1);
rowMixed10[i*5 + 2] = new Double( Math.random() * 1000000 );
ValueMetaInterface meta2 = new ValueMeta("String"+(i*5+1), ValueMetaInterface.TYPE_NUMBER, 12, 4);
metaMixed10.addValueMeta(meta2);
rowMixed10[i*5 + 3] = new Long( (long)(Math.random() * 1000000) );
ValueMetaInterface meta3 = new ValueMeta("String"+(i*5+1), ValueMetaInterface.TYPE_INTEGER, 8, 0);
metaMixed10.addValueMeta(meta3);
rowMixed10[i*5 + 4] = Boolean.valueOf( Math.random() > 0.5 ? true : false );
ValueMetaInterface meta4 = new ValueMeta("String"+(i*5+1), ValueMetaInterface.TYPE_BOOLEAN);
metaMixed10.addValueMeta(meta4);
}
public long runTestStrings10(int iterations) throws KettleValueException
{
long startTime = System.currentTimeMillis();
for (int i=0;i<iterations;i++)
{
metaString10.cloneRow(rowString10);
}
long stopTime = System.currentTimeMillis();
return stopTime - startTime;
}
public long runTestMixed10(int iterations) throws KettleValueException
{
long startTime = System.currentTimeMillis();
for (int i=0;i<iterations;i++)
{
metaMixed10.cloneRow(rowMixed10);
}
long stopTime = System.currentTimeMillis();
return stopTime - startTime;
}
public long runTestStrings100(int iterations) throws KettleValueException
{
long startTime = System.currentTimeMillis();
for (int i=0;i<iterations;i++)
{
metaString100.cloneRow(rowString100);
}
long stopTime = System.currentTimeMillis();
return stopTime - startTime;
}
public long runTestMixed100(int iterations) throws KettleValueException
{
long startTime = System.currentTimeMillis();
for (int i=0;i<iterations;i++)
{
metaMixed100.cloneRow(rowMixed100);
}
long stopTime = System.currentTimeMillis();
return stopTime - startTime;
}
public long runTestStrings1000(int iterations) throws KettleValueException
{
long startTime = System.currentTimeMillis();
for (int i=0;i<iterations;i++)
{
metaString1000.cloneRow(rowString1000);
}
long stopTime = System.currentTimeMillis();
return stopTime - startTime;
}
public long runTestMixed1000(int iterations) throws KettleValueException
{
long startTime = System.currentTimeMillis();
for (int i=0;i<iterations;i++)
{
metaMixed1000.cloneRow(rowMixed1000);
}
long stopTime = System.currentTimeMillis();
return stopTime - startTime;
}
public static final int ITERATIONS = 1000000;
public static void main(String[] args) throws KettleValueException
{
SpeedTest speedTest = new SpeedTest();
long timeString10 = speedTest.runTestStrings10(ITERATIONS);
System.out.println("Time to run 'String10' test "+ITERATIONS+" times : "+timeString10+" ms ("+(1000*ITERATIONS/timeString10)+" r/s)");
long timeMixed10 = speedTest.runTestMixed10(ITERATIONS);
System.out.println("Time to run 'Mixed10' test "+ITERATIONS+" times : "+timeMixed10+" ms ("+(1000*ITERATIONS/timeMixed10)+" r/s)");
System.out.println();
long timeString100 = speedTest.runTestStrings100(ITERATIONS);
System.out.println("Time to run 'String100' test "+ITERATIONS+" times : "+timeString100+" ms ("+(1000*ITERATIONS/timeString100)+" r/s)");
long timeMixed100 = speedTest.runTestMixed100(ITERATIONS);
System.out.println("Time to run 'Mixed100' test "+ITERATIONS+" times : "+timeMixed100+" ms ("+(1000*ITERATIONS/timeMixed100)+" r/s)");
System.out.println();
long timeString1000 = speedTest.runTestStrings1000(ITERATIONS);
System.out.println("Time to run 'String1000' test "+ITERATIONS+" times : "+timeString1000+" ms ("+(1000*ITERATIONS/timeString1000)+" r/s)");
long timeMixed1000 = speedTest.runTestMixed1000(ITERATIONS);
System.out.println("Time to run 'Mixed1000' test "+ITERATIONS+" times : "+timeMixed1000+" ms ("+(1000*ITERATIONS/timeMixed1000)+" r/s)");
System.out.println();
}
}
| lihongqiang/kettle-4.4.0-stable | src-core/org/pentaho/di/core/row/SpeedTest.java | Java | apache-2.0 | 7,980 |
/*
* Copyright 2014 http://Bither.net
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.bither.bitherj.api;
import net.bither.bitherj.api.http.BitherUrl;
import net.bither.bitherj.api.http.HttpPostResponse;
import net.bither.bitherj.api.http.HttpSetting.HttpType;
import org.apache.http.HttpEntity;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.protocol.HTTP;
import java.util.ArrayList;
import java.util.List;
public class GetCookieApi extends HttpPostResponse<String> {
private static final String TIME_STRING = "ts";
public GetCookieApi() {
setUrl(BitherUrl.BITHER_GET_COOKIE_URL);
setHttpType(HttpType.GetBitherCookie);
}
@Override
public void setResult(String response) throws Exception {
this.result = response;
}
@Override
public HttpEntity getHttpEntity() throws Exception {
long time = System.currentTimeMillis();
time = time / 1000 * 1000 + 215;
List<NameValuePair> params = new ArrayList<NameValuePair>();
params.add(new BasicNameValuePair(TIME_STRING, Long.toString(time)));
return new UrlEncodedFormEntity(params, HTTP.UTF_8);
}
}
| bither/bitherj | bitherj/src/main/java/net/bither/bitherj/api/GetCookieApi.java | Java | apache-2.0 | 1,805 |
package dotty.tools.benchmarks.tuples
import org.openjdk.jmh.annotations._
@State(Scope.Thread)
class Tail {
@Param(Array("1"))
var size: Int = _
var tuple: NonEmptyTuple = _
var array: Array[Object] = _
@Setup
def setup(): Unit = {
tuple = "elem" *: Tuple()
for (i <- 1 until size)
tuple = "elem" *: tuple
array = Array.fill(size)("elem")
}
@Benchmark
def tupleTail(): Tuple = {
runtime.Tuples.tail(tuple)
}
@Benchmark
def arrayTail(): Array[Object] = {
array.tail
}
}
| dotty-staging/dotty | bench-run/src/main/scala/dotty/tools/benchmarks/tuples/Tail.scala | Scala | apache-2.0 | 529 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.test.state.operator.restore.keyed;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.ConfigConstants;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import org.apache.flink.streaming.api.functions.windowing.RichWindowFunction;
import org.apache.flink.streaming.api.windowing.windows.GlobalWindow;
import org.apache.flink.test.state.operator.restore.ExecutionMode;
import org.apache.flink.util.Collector;
import org.junit.Assert;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
/**
* Savepoint generator to create the savepoint used by the {@link AbstractKeyedOperatorRestoreTestBase}.
* Switch to specific version branches and run this job to create savepoints of different Flink versions.
*
* The job should be cancelled manually through the REST API using the cancel-with-savepoint operation.
*/
public class KeyedJob {
public static void main(String[] args) throws Exception {
ParameterTool pt = ParameterTool.fromArgs(args);
String savepointsPath = pt.getRequired("savepoint-path");
Configuration config = new Configuration();
config.setString(ConfigConstants.SAVEPOINT_DIRECTORY_KEY, savepointsPath);
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(config);
env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
env.setRestartStrategy(RestartStrategies.noRestart());
env.setStateBackend(new MemoryStateBackend());
/**
* Source -> keyBy -> C(Window -> StatefulMap1 -> StatefulMap2)
*/
SingleOutputStreamOperator<Tuple2<Integer, Integer>> source = createIntegerTupleSource(env, ExecutionMode.GENERATE);
SingleOutputStreamOperator<Integer> window = createWindowFunction(ExecutionMode.GENERATE, source);
SingleOutputStreamOperator<Integer> first = createFirstStatefulMap(ExecutionMode.GENERATE, window);
SingleOutputStreamOperator<Integer> second = createSecondStatefulMap(ExecutionMode.GENERATE, first);
env.execute("job");
}
public static SingleOutputStreamOperator<Tuple2<Integer, Integer>> createIntegerTupleSource(StreamExecutionEnvironment env, ExecutionMode mode) {
return env.addSource(new IntegerTupleSource(mode));
}
public static SingleOutputStreamOperator<Integer> createWindowFunction(ExecutionMode mode, DataStream<Tuple2<Integer, Integer>> input) {
return input
.keyBy(0)
.countWindow(1)
.apply(new StatefulWindowFunction(mode))
.setParallelism(4)
.uid("window");
}
public static SingleOutputStreamOperator<Integer> createFirstStatefulMap(ExecutionMode mode, DataStream<Integer> input) {
SingleOutputStreamOperator<Integer> map = input
.map(new StatefulStringStoringMap(mode, "first"))
.setParallelism(4);
if (mode == ExecutionMode.MIGRATE || mode == ExecutionMode.RESTORE) {
map.uid("first");
}
return map;
}
public static SingleOutputStreamOperator<Integer> createSecondStatefulMap(ExecutionMode mode, DataStream<Integer> input) {
SingleOutputStreamOperator<Integer> map = input
.map(new StatefulStringStoringMap(mode, "second"))
.setParallelism(4);
if (mode == ExecutionMode.MIGRATE || mode == ExecutionMode.RESTORE) {
map.uid("second");
}
return map;
}
private static final class IntegerTupleSource extends RichSourceFunction<Tuple2<Integer, Integer>> {
private static final long serialVersionUID = 1912878510707871659L;
private final ExecutionMode mode;
private boolean running = true;
private IntegerTupleSource(ExecutionMode mode) {
this.mode = mode;
}
@Override
public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
for (int x = 0; x < 10; x++) {
ctx.collect(new Tuple2<>(x, x));
}
switch (mode) {
case GENERATE:
case MIGRATE:
synchronized (this) {
while (running) {
this.wait();
}
}
}
}
@Override
public void cancel() {
synchronized (this) {
running = false;
this.notifyAll();
}
}
}
private static final class StatefulWindowFunction extends RichWindowFunction<Tuple2<Integer, Integer>, Integer, Tuple, GlobalWindow> {
private static final long serialVersionUID = -7236313076792964055L;
private final ExecutionMode mode;
private transient ListState<Integer> state;
private boolean applyCalled = false;
private StatefulWindowFunction(ExecutionMode mode) {
this.mode = mode;
}
@Override
public void open(Configuration config) {
this.state = getRuntimeContext().getListState(new ListStateDescriptor<>("values", Integer.class));
}
@Override
public void apply(Tuple key, GlobalWindow window, Iterable<Tuple2<Integer, Integer>> values, Collector<Integer> out) throws Exception {
// fail-safe to make sure apply is actually called
applyCalled = true;
switch (mode) {
case GENERATE:
for (Tuple2<Integer, Integer> value : values) {
state.add(value.f1);
}
break;
case MIGRATE:
case RESTORE:
Iterator<Tuple2<Integer, Integer>> input = values.iterator();
Iterator<Integer> restored = state.get().iterator();
while (input.hasNext() && restored.hasNext()) {
Tuple2<Integer, Integer> value = input.next();
Integer rValue = restored.next();
Assert.assertEquals(rValue, value.f1);
}
Assert.assertEquals(restored.hasNext(), input.hasNext());
}
}
@Override
public void close() {
Assert.assertTrue("Apply was never called.", applyCalled);
}
}
private static class StatefulStringStoringMap extends RichMapFunction<Integer, Integer> implements ListCheckpointed<String> {
private static final long serialVersionUID = 6092985758425330235L;
private final ExecutionMode mode;
private final String valueToStore;
private StatefulStringStoringMap(ExecutionMode mode, String valueToStore) {
this.mode = mode;
this.valueToStore = valueToStore;
}
@Override
public Integer map(Integer value) throws Exception {
return value;
}
@Override
public List<String> snapshotState(long checkpointId, long timestamp) throws Exception {
return Arrays.asList(valueToStore + getRuntimeContext().getIndexOfThisSubtask());
}
@Override
public void restoreState(List<String> state) throws Exception {
switch (mode) {
case GENERATE:
break;
case MIGRATE:
case RESTORE:
Assert.assertEquals("Failed for " + valueToStore + getRuntimeContext().getIndexOfThisSubtask(), 1, state.size());
String value = state.get(0);
Assert.assertEquals(valueToStore + getRuntimeContext().getIndexOfThisSubtask(), value);
}
}
}
}
| hongyuhong/flink | flink-tests/src/test/java/org/apache/flink/test/state/operator/restore/keyed/KeyedJob.java | Java | apache-2.0 | 8,230 |
---
layout: section
title: "Distinct"
permalink: /documentation/transforms/java/aggregation/distinct/
section_menu: section-menu/documentation.html
---
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Distinct
<table align="left">
<a target="_blank" class="button"
href="https://beam.apache.org/releases/javadoc/current/index.html?org/apache/beam/sdk/transforms/Distinct.html">
<img src="https://beam.apache.org/images/logos/sdks/java.png" width="20px" height="20px"
alt="Javadoc" />
Javadoc
</a>
</table>
<br>
Produces a collection containing distinct elements of the input collection.
On some data sets, it might be more efficient to compute an approximate
answer using `ApproximateUnique`, which also allows for determining distinct
values for each key.
## Examples
See [BEAM-7703](https://issues.apache.org/jira/browse/BEAM-7703) for updates.
## Related transforms
* [Count]({{ site.baseurl }}/documentation/transforms/java/aggregation/count)
counts the number of elements within each aggregation.
* [ApproximateUnique]({{ site.baseurl }}/documentation/transforms/java/aggregation/approximateunique)
estimates the number of distinct elements in a collection. | RyanSkraba/beam | website/src/documentation/transforms/java/aggregation/distinct.md | Markdown | apache-2.0 | 1,690 |
/*
* Copyright (C) 2010 Andrey Yeremenok (eav1986__at__gmail__com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package org.formbuilder.util;
import javax.annotation.Nonnull;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
/**
* A bridge to CGLib dependency
*
* @author aeremenok Date: 09.08.2010 Time: 17:38:47
*/
public class CGLibUtil
{
// -------------------------- STATIC METHODS --------------------------
@SuppressWarnings( {"unchecked"} )
@Nonnull
public static <T> T createCGLibProxy( final Class<T> beanClass,
final InvocationHandler handler )
{
final net.sf.cglib.proxy.Enhancer e = new net.sf.cglib.proxy.Enhancer();
e.setSuperclass( beanClass );
e.setUseFactory( false );
e.setCallback( new net.sf.cglib.proxy.InvocationHandler()
{
public Object invoke( final Object proxy,
final Method method,
final Object[] args )
throws
Throwable
{
return handler.invoke( proxy, method, args );
}
} );
return (T) e.create();
}
}
| 0359xiaodong/swing-formbuilder | src/formbuilder-parent/formbuilder-main/src/main/java/org/formbuilder/util/CGLibUtil.java | Java | apache-2.0 | 1,795 |
+++
Title = "Jude Pereira"
Linkedin = "https://www.linkedin.com/in/judebpereira/"
Website = "https://asyncy.com/"
Twitter = "judebpereira"
image = "jude-pereira.jpg"
type = "speaker"
linktitle = "jude-pereira"
+++
I have been developing and building web scale apps for the most of my life, from writing SDKs for Android and iOS which are delivered to millions of people across the globe, to overseeing the migration of a 2TB database in under 7 hours (in-house proprietary time series database).
I'm on a journey to build the next cloud platform for highly available and scalable apps - Asyncy and Storyscript, both of which are fully open source projects.
| gomex/devopsdays-web | content/events/2019-amsterdam/speakers/jude-pereira.md | Markdown | apache-2.0 | 658 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/chime/model/UpdateUserSettingsRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Chime::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
UpdateUserSettingsRequest::UpdateUserSettingsRequest() :
m_accountIdHasBeenSet(false),
m_userIdHasBeenSet(false),
m_userSettingsHasBeenSet(false)
{
}
Aws::String UpdateUserSettingsRequest::SerializePayload() const
{
JsonValue payload;
if(m_userSettingsHasBeenSet)
{
payload.WithObject("UserSettings", m_userSettings.Jsonize());
}
return payload.View().WriteReadable();
}
| awslabs/aws-sdk-cpp | aws-cpp-sdk-chime/source/model/UpdateUserSettingsRequest.cpp | C++ | apache-2.0 | 749 |
<?
// Copyright 2013 Tera Insights, LLC. All Rights Reserved.
//
// Author: Christopher Dudley
function InverseGaussianGen( array $t_args ) {
grokit_assert(array_key_exists('output', $t_args), 'No output type specified for inverse gaussian generator.');
grokit_assert(array_key_exists('ns', $t_args), 'No namespace specified for inverse gaussian generator.');
$output = $t_args['output'];
$ns = $t_args['ns'];
$norm = $ns . '::' . 'normal_distribution<double>';
$uni = $ns . '::' . 'uniform_real_distribution<double>';
$className = generate_name('InverseGaussianGen');
?>
class <?=$className?> {
<?=$norm?> norm;
<?=$uni?> uni;
typedef long double real_t;
const real_t mean;
const real_t shape;
const real_t mean_2;
const real_t mean_o_2_shape;
public:
<?=$className?>(void) = delete;
<?=$className?>( <?=$output?> _mean, <?=$output?> _shape ):
norm(0.0, 1.0),
uni(0.0, 1.0),
mean(_mean),
shape(_shape),
mean_2(_mean * _mean),
mean_o_2_shape(_mean / (2 * _shape))
{ }
template< class Generator >
<?=$output?> operator() ( Generator & g ) {
const real_t v = norm(g);
const real_t y = v * v;
const real_t y_2 = y * y;
const real_t x = (mean) + (mean_o_2_shape * mean * y) - (mean_o_2_shape * std::sqrt((4 * mean * shape * y) + (mean_2 * y_2)));
const real_t z = uni(g);
const real_t deci = mean / (mean + x);
return z > deci ? (mean_2 / x) : x;
}
};
<?
return array(
'kind' => 'RESOURCE',
'name' => $className,
'system_headers' => [ 'cmath' ],
);
} // end InverseGaussianGen
/**
* GI that generates data in clusters, using a specified distribution for each
* cluster.
*
* This GI requires the following template arguments:
* - 'n' or 0
* The number of tuples to generate. Note: this value is per task.
* The total number of tuples generated will be n_tasks * n
* - 'centers' or 1
* A list of configuration for the centers.
*
* The following template arguments are optional:
* - 'outputs'
* If the outputs of the GI are not given implicitly, they can be
* specified in this template argument. The number of dimensions will
* be determined by the number of outputs.
*
* All output types must be numeric real types. The default type for
* outputs is DOUBLE.
* - 'dist.lib' = 'std'
* Which library to use for generating distributions.
* Valid options are:
* - std
* - boost
* - 'seed' = null
* The seed to be used for the random number generator. This seed will
* be used to generate the seed for each task, and different runs with
* the same seed will produce the same data.
* - 'compute.sets' = 1
* The number of sets of tuples to compute at once.
*
* Each center configuration is a functor with the form:
* dist_name(args)
*
* The following distributions are supported:
* { Uniform Distributions }
* - uniform(a = 0, b = 1)
*
* { Normal Distributions }
* - normal(mean = 0.0, std_dev = 1.0) [ synonyms: gaussian ]
* - inverse_gaussian(mean = 1, shape = 1) [ synonyms: inverse_normal ]
*
* { Bernoulli Distributions }
* - binomial(t = 1, p = 0.5)
* - negative_binomial(k = 1, p = 0.5)
*
* { Poisson Distributions }
* - exponential( lambda = 1 )
* - gamma(alpha = 1, beta = 1) [ synonyms: Gamma ]
*/
function ClusterGen( array $t_args, array $outputs ) {
$sys_headers = [ 'array', 'cinttypes' ];
$user_headers = [ ];
$libraries = [];
if( \count($outputs) == 0 ) {
grokit_assert(array_key_exists('outputs', $t_args),
'ClusterGen: No outputs specified');
$count = 0;
foreach( $t_args['outputs'] as $type ) {
if( is_identifier($type) )
$type = lookupType($type);
grokit_assert(is_datatype($type),
'ClusterGen: Non data-type ' . $type . ' given as output');
$name = 'output' . $count++;
$outputs[$name] = $type;
}
}
foreach( $outputs as $name => &$type ) {
if( is_null($type) ) {
$type = lookupType('base::DOUBLE');
}
else {
grokit_assert($type->is('real'),
'ClusterGen: Non-real datatype ' . $type . ' given as output');
}
}
$myOutputs = [];
foreach( $outputs as $name => $type ) {
$myOutputs[$name] = $type;
}
$tSize = \count($outputs);
$seed = get_default($t_args, 'seed', null);
if( $seed !== null ) {
grokit_assert(is_int($seed), 'ClusterGen: Seed must be an integer or null.');
} else {
$user_headers[] = 'HashFunctions.h';
}
$distLib = get_default($t_args, 'dist.lib', 'std');
$distNS = '';
switch( $distLib ) {
case 'std':
$sys_headers[] = 'random';
$distNS = 'std';
break;
case 'boost':
$sys_headers[] = 'boost/random.hpp';
$distNS = 'boost::random';
$libraries[] = 'boost_random-mt';
if( $seed === null ) {
// Need random_device
$sys_headers[] = 'boost/random/random_device.hpp';
$libraries[] = 'boost_system-mt';
}
break;
default:
grokit_error('ClusterGen: Unknown RNG library ' . $distLib);
}
$distRNG = 'mt19937';
$RNGtype = $distNS . '::' . $distRNG;
$nTuples = get_first_key($t_args, ['n', '0']);
grokit_assert(is_int($nTuples),
'ClusterGen: the number of tuples to be produced must be an integer.');
$centers = get_first_key($t_args, [ 'centers', 1 ] );
grokit_assert(is_array($centers),
'ClusterGen: centers must be an array of functors');
$handleDist = function($name, $args, $oType) use ($distNS) {
$distName = '';
$distArgs = [];
switch( $name ) {
case 'gaussian':
case 'normal':
$distName = $distNS . '::' . 'normal_distribution<' . $oType . '>';
grokit_assert(\count($args) <= 2,
'ClusterGen: Normal distribution takes at most 2 arguments, ' . \count($args) . ' given');
$mean = get_default($args, ['mean', 0], 0.0);
$sigma = get_default($args, ['std_dev', 'sigma', 1], 1.0);
grokit_assert( is_numeric($mean), 'ClusterGen: mean parameter of binomial distribution must be a real number.');
grokit_assert( is_numeric($sigma), 'ClusterGen: sigma parameter of binomial distribution must be a real number.');
$mean = floatval($mean);
$sigma = floatval($sigma);
$distArgs = [ $mean, $sigma ];
break;
case 'binomial':
$distName = $distNS . '::' . 'binomial_distribution<' . $oType . '>';
grokit_assert(\count($args) <= 2,
'ClusterGen: Binomial distribution takes at most 2 arguments, ' . \count($args) . ' given');
$t = get_default($args, ['t', 0], 1);
$p = get_default($args, ['p', 1], 0.5);
grokit_assert( is_int($t), 'ClusterGen: t parameter of binomial distribution must be an integer.');
grokit_assert( is_numeric($p), 'ClusterGen: p parameter of binomial distribution must be a real number.');
$p = floatval($p);
grokit_assert( $p >= 0 && $p <= 1, 'ClusterGen: p parameter of binomial distribution must be in the range [0, 1]');
grokit_assert( $t >= 0, 'ClusterGen: t parameter of binomial distribution must be in the range [0, +inf)');
$distArgs = [ $t, $p ];
break;
case 'negative_binomial':
$distName = $distNS . '::' . 'negative_binomial_distribution<' . $oType . '>';
grokit_assert(\count($args) <= 2,
'ClusterGen: Negative Binomial distribution takes at most 2 arguments, ' . \count($args) . ' given');
$k = get_default($args, ['k', 0], 1);
$p = get_default($args, ['p', 1], 0.5);
grokit_assert( is_int($k), 'ClusterGen: k parameter of binomial distribution must be an integer.');
grokit_assert( is_numeric($p), 'ClusterGen: p parameter of binomial distribution must be a real number.');
$p = floatval($p);
grokit_assert( $p > 0 && $p <= 1, 'ClusterGen: p parameter of negative binomial distribution must be in the range (0, 1]');
grokit_assert( $k > 0, 'ClusterGen: k parameter of negative binomial distribution must be in the range (0, +inf)');
$distArgs = [ $k, $p ];
break;
case 'inverse_gaussian':
case 'inverse_normal':
grokit_assert(\count($args) <= 2,
'ClusterGen: Inverse Gaussian distribution takes at most 2 arguments, ' . \count($args) . ' given');
$mean = get_default($args, ['mean', 0], 1);
$shape = get_default($args, ['shape', 1], 1);
grokit_assert( is_numeric($mean), 'ClusterGen: mean parameter of inverse gaussian distribution must be a real number.');
grokit_assert( is_numeric($shape), 'ClusterGen: shape parameter of inverse gaussian distribution must be a real number.');
$mean = floatval($mean);
$shape = floatval($shape);
grokit_assert( $mean > 0, 'ClusterGen: mean of inverse gaussian distribution must be in range (0, inf)');
grokit_assert( $shape > 0, 'ClusterGen: shape of inverse gaussian distribution must be in range (0, inf)');
$gen_args = [ 'output' => $oType, 'ns' => $distNS ];
$distName = strval(lookupResource('datagen::InverseGaussianGen', $gen_args));
$distArgs = [ $mean, $shape ];
break;
case 'uniform':
$distName = $distNS . '::' . 'uniform_real_distribution<' . $oType . '>';
grokit_assert(\count($args) <= 2,
'ClusterGen: Uniform distribution takes at most 2 arguments, ' . \count($args) . ' given');
$a = get_default($args, ['a', 0], 0.0);
$b = get_default($args, ['b', 1], 1.0);
grokit_assert( is_numeric($a), 'ClusterGen: `a` parameter of uniform distribution must be a real number.');
grokit_assert( is_numeric($b), 'ClusterGen: `b` parameter of uniform distribution must be a real number.');
$a = floatval($a);
$b = floatval($b);
grokit_assert( $b >= $a, 'ClusterGen: `b` parameter of uniform distribution must be >= the `a` parameter.');
$distArgs = [ $a, $b ];
break;
case 'exponential':
$distName = $distNS . '::' . 'exponential_distribution<'. $oType . '>';
grokit_assert(\count($args) <= 1,
'ClusterGen: Exponential distribution takes at most 1 argument.');
$lambda = get_default($args, [ 'lambda', 0 ], 1.0);
grokit_assert( is_numeric($lambda), 'ClusterGen: `lambda` parameter of exponential distribution must be a real number.');
$lambda = floatval($lambda);
grokit_assert( $lambda > 0, 'ClusterGen: `lambda` parameter of exponential distribution must be in range (0, +inf).');
$distArgs = [ $lambda ];
break;
case 'gamma':
case 'Gamma':
$distName = $distNS . '::' . 'gamma_distribution<'. $oType . '>';
grokit_assert(\count($args) <= 2,
'ClusterGen: Gamma distribution takes at most 2 arguments.');
$alpha = get_default($args, ['alpha', 0], 1.0);
$beta = det_default($args, ['beta', 1], 1.0);
grokit_assert( is_numeric($alpha), 'ClusterGen: `alpha` parameter of gamma distribution must be a real number.');
grokit_assert( is_numeric($beta), 'ClusterGen: `beta` parameter of gamma distribution must be a real number.');
$alpha = floatval($alpha);
$beta = floatval($beta);
$distArgs = [ $alpha, $beta ];
break;
default:
grokit_error('ClusterGen: Unknown distribution ' . $name . ' given for center');
}
return [ $distName, $distArgs ];
};
$dists = [];
$distArgs = [];
$count = 0;
$oType = '';
$nCenters = 1;
reset($outputs);
foreach( $centers as $val ) {
$cluster = $val;
if( is_functor($val) ) {
$cluster = [ $val ];
}
else if( is_array( $val ) ) {
$nCenters = lcm($nCenters, \count($val));
}
else {
grokit_error('ClusterGen: center descriptions must be functors or list of functors');
}
$curDist = [];
$curDistArgs = [];
$curDistName = 'distribution' . $count++;
$oType = strval(current($outputs));
$iCount = 0;
foreach ( $cluster as $functor ) {
grokit_assert(is_functor($functor), 'ClusterGen: center description must be a functor');
$vName = $curDistName . '_' . $iCount++;
$ret = $handleDist($functor->name(), $functor->args(), $oType);
$curDist[$vName] = $ret[0];
$curDistArgs[$vName] = $ret[1];
}
next($outputs);
$dists[$curDistName] = $curDist;
$distArgs[$curDistName] = $curDistArgs;
}
// Determine the default number of sets to compute at a time.
// We want to generate either $nTuples or 10,000 tuples, depending on which
// is less.
$defaultSetsTarget = min($nTuples, 10000);
$setsToTarget = intval(ceil($defaultSetsTarget / $nCenters));
$computeSets = get_default( $t_args, 'compute.sets', $setsToTarget );
grokit_assert(is_int($computeSets) && $computeSets > 0,
'ClusterGen: compute.sets must be a positive integer, ' . $computeSets . ' given');
$className = generate_name('ClusterGen');
// For some BIZZARE reason, the $outputs array was getting modified while
// traversing over the $dists array. Making a deep copy of the outputs and
// then reassigning it seems to fix the issue.
$outputs = $myOutputs;
?>
class <?=$className?> {
// The number of tuples to produce per task
static constexpr size_t N = <?=$nTuples?>;
static constexpr size_t CacheSize = <?=$computeSets * $nCenters?>;
// Typedefs
typedef std::tuple<<?=array_template('{val}', ', ', $outputs)?>> Tuple;
typedef std::array<Tuple, CacheSize> TupleArray;
typedef TupleArray::const_iterator TupleIterator;
typedef <?=$RNGtype?> RandGen;
// Number of tuples produced.
uintmax_t count;
// Cache a number of outputs for efficiency
TupleArray cache;
TupleIterator cacheIt;
// Random number generator
RandGen rng;
// Distributions
<? // This is the section causing issues.
foreach($dists as $name => $list) {
foreach( $list as $vName => $type ) {
?>
<?=$type?> <?=$vName?>;
<?
} // foreach distribution
} // foreach cluster set
?>
// Helper function to generate tuples.
void GenerateTuples(void) {
<?
$tIndex = 0;
foreach($dists as $name => $list) {
$lCenters = \count($list);
// $nCenters has been defined to be the LCM of the number of centers in
// any column, so $lCenter is guaranteed to divide evenly into
// CacheSize
?>
for( size_t index = 0; CacheSize > index; index += <?=$lCenters?> ) {
<?
$index = 0;
foreach( $list as $vName => $type ) {
?>
std::get<<?=$tIndex?>>(cache[index + <?=$index?>]) = <?=$vName?>(rng);
<?
$index++;
} // foreach value in tuple
?>
}
<?
$tIndex++;
} // foreach distribution
?>
cacheIt = cache.cbegin();
}
public:
// Constructor
<?=$className?>( GIStreamProxy & _stream ) :
cache()
, cacheIt()
, count(0)
, rng()
<? foreach($dists as $name => $list) {
foreach( $list as $vName => $type ) {
?>
, <?=$vName?>(<?=implode(', ', $distArgs[$name][$vName])?>)
<?
} // foreach distribution
} // foreach cluster set
?>
{
<? if( is_null($seed) ) { ?>
<?=$distNS?>::random_device rd;
<? } // if seed is null ?>
RandGen::result_type seed = <?=is_null($seed) ? 'rd()' : "CongruentHash($seed, _stream.get_id() )"?>;
rng.seed(seed);
cacheIt = cache.cend();
}
// Destructor
~<?=$className?>(void) { }
bool ProduceTuple(<?=typed_ref_args($outputs)?>) {
if( N > count ) {
if( cacheIt == cache.cend() ) {
GenerateTuples();
}
<?
$tIndex = 0;
foreach($outputs as $name => $type) {
?>
<?=$name?> = std::get<<?=$tIndex?>>(*cacheIt);
<?
$tIndex++;
} // foreach output
?>
++cacheIt;
++count;
return true;
}
else {
return false;
}
}
};
<?
return array(
'kind' => 'GI',
'name' => $className,
'output' => $outputs,
'system_headers' => $sys_headers,
'user_headers' => $user_headers,
'libraries' => $libraries,
);
}
?>
| nishantmehta/mainGrokit | Libs/datagen/GIs/ClusterGen.h.php | PHP | apache-2.0 | 17,421 |
import ssl
from pyOpenSSL import SSL
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv2)
SSL.Context(method=SSL.SSLv2_METHOD)
SSL.Context(method=SSL.SSLv23_METHOD)
herp_derp(ssl_version=ssl.PROTOCOL_SSLv2)
herp_derp(method=SSL.SSLv2_METHOD)
herp_derp(method=SSL.SSLv23_METHOD)
# strict tests
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv3)
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1)
SSL.Context(method=SSL.SSLv3_METHOD)
SSL.Context(method=SSL.TLSv1_METHOD)
herp_derp(ssl_version=ssl.PROTOCOL_SSLv3)
herp_derp(ssl_version=ssl.PROTOCOL_TLSv1)
herp_derp(method=SSL.SSLv3_METHOD)
herp_derp(method=SSL.TLSv1_METHOD)
ssl.wrap_socket()
def open_ssl_socket(version=ssl.PROTOCOL_SSLv2):
pass
def open_ssl_socket(version=SSL.SSLv2_METHOD):
pass
def open_ssl_socket(version=SSL.SSLv23_METHOD):
pass
# this one will pass ok
def open_ssl_socket(version=SSL.TLSv1_1_METHOD):
pass
| chair6/bandit | examples/ssl-insecure-version.py | Python | apache-2.0 | 892 |
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.integrationtest.jobexecutor;
import static org.junit.Assert.assertEquals;
import java.util.Timer;
import java.util.TimerTask;
import org.camunda.bpm.engine.ManagementService;
import org.camunda.bpm.engine.ProcessEngine;
import org.camunda.bpm.engine.ProcessEngineException;
import org.camunda.bpm.engine.RuntimeService;
import org.camunda.bpm.engine.cdi.impl.util.ProgrammaticBeanLookup;
import org.camunda.bpm.engine.impl.cfg.ProcessEngineConfigurationImpl;
import org.camunda.bpm.engine.impl.jobexecutor.JobExecutor;
import org.camunda.bpm.integrationtest.jobexecutor.beans.ManagedJobExecutorBean;
import org.camunda.bpm.integrationtest.util.DeploymentHelper;
import org.camunda.bpm.integrationtest.util.TestContainer;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.asset.EmptyAsset;
import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@RunWith(Arquillian.class)
public class ManagedJobExecutorTest {
@Deployment
public static WebArchive createDeployment() {
WebArchive archive = ShrinkWrap.create(WebArchive.class, "test.war")
.addAsWebInfResource(EmptyAsset.INSTANCE, "beans.xml")
.addAsLibraries(DeploymentHelper.getEngineCdi())
.addClass(ManagedJobExecutorTest.class)
.addClass(ManagedJobExecutorBean.class)
.addAsResource("org/camunda/bpm/integrationtest/jobexecutor/ManagedJobExecutorTest.testManagedExecutorUsed.bpmn20.xml");
TestContainer.addContainerSpecificResourcesForNonPa(archive);
return archive;
}
protected ProcessEngine processEngine;
protected ManagementService managementService;
protected RuntimeService runtimeService;
@Before
public void setUpCdiProcessEngineTestCase() throws Exception {
processEngine = (ProgrammaticBeanLookup.lookup(ManagedJobExecutorBean.class)).getProcessEngine();
managementService = processEngine.getManagementService();
runtimeService = processEngine.getRuntimeService();
}
@After
public void tearDownCdiProcessEngineTestCase() throws Exception {
processEngine = null;
managementService = null;
runtimeService = null;
}
@Test
public void testManagedExecutorUsed() throws InterruptedException {
org.camunda.bpm.engine.repository.Deployment deployment = processEngine.getRepositoryService().createDeployment()
.addClasspathResource("org/camunda/bpm/integrationtest/jobexecutor/ManagedJobExecutorTest.testManagedExecutorUsed.bpmn20.xml")
.deploy();
try {
String pid = runtimeService.startProcessInstanceByKey("testBusinessProcessScopedWithJobExecutor").getId();
assertEquals(1L, managementService.createJobQuery().processInstanceId(pid).count());
waitForJobExecutorToProcessAllJobs(pid, 5000l, 25l);
assertEquals(0L, managementService.createJobQuery().processInstanceId(pid).count());
assertEquals("bar", runtimeService.createVariableInstanceQuery().processInstanceIdIn(pid).variableName("foo").singleResult().getValue());
} finally {
processEngine.getRepositoryService().deleteDeployment(deployment.getId(), true);
}
}
protected void waitForJobExecutorToProcessAllJobs(String processInstanceId, long maxMillisToWait, long intervalMillis) {
JobExecutor jobExecutor = ((ProcessEngineConfigurationImpl) processEngine.getProcessEngineConfiguration()).getJobExecutor();
jobExecutor.start();
try {
Timer timer = new Timer();
InteruptTask task = new InteruptTask(Thread.currentThread());
timer.schedule(task, maxMillisToWait);
boolean areJobsAvailable = true;
try {
while (areJobsAvailable && !task.isTimeLimitExceeded()) {
Thread.sleep(intervalMillis);
areJobsAvailable = areJobsAvailable(processInstanceId);
}
} catch (InterruptedException e) {
} finally {
timer.cancel();
}
if (areJobsAvailable) {
throw new ProcessEngineException("time limit of " + maxMillisToWait + " was exceeded");
}
} finally {
jobExecutor.shutdown();
}
}
private static class InteruptTask extends TimerTask {
protected boolean timeLimitExceeded = false;
protected Thread thread;
public InteruptTask(Thread thread) {
this.thread = thread;
}
public boolean isTimeLimitExceeded() {
return timeLimitExceeded;
}
@Override
public void run() {
timeLimitExceeded = true;
thread.interrupt();
}
}
protected boolean areJobsAvailable(String processInstanceId) {
return !managementService
.createJobQuery()
.processInstanceId(processInstanceId)
.executable()
.list()
.isEmpty();
}
}
| camunda/camunda-bpm-platform | qa/integration-tests-engine/src/test/java/org/camunda/bpm/integrationtest/jobexecutor/ManagedJobExecutorTest.java | Java | apache-2.0 | 5,690 |
/*------------------------------------------------------------------------------
*
* cdbappendonlyblockdirectory.h
*
* Copyright (c) 2009, Greenplum Inc.
*
*------------------------------------------------------------------------------
*/
#ifndef CDBAPPENDONLYBLOCKDIRECTORY_H
#define CDBAPPENDONLYBLOCKDIRECTORY_H
#include "access/aosegfiles.h"
#include "access/aocssegfiles.h"
#include "access/appendonlytid.h"
#include "access/skey.h"
extern int gp_blockdirectory_entry_min_range;
extern int gp_blockdirectory_minipage_size;
typedef struct AppendOnlyBlockDirectoryEntry
{
/*
* The range of blocks covered by the Block Directory entry.
*/
struct range
{
int64 fileOffset;
int64 firstRowNum;
int64 afterFileOffset;
int64 lastRowNum;
} range;
} AppendOnlyBlockDirectoryEntry;
/*
* The entry in the minipage.
*/
typedef struct MinipageEntry
{
int64 firstRowNum;
int64 fileOffset;
int64 rowCount;
} MinipageEntry;
/*
* Define a varlena type for a minipage.
*/
typedef struct Minipage
{
/* Total length. Must be the first. */
int32 _len;
int32 version;
uint32 nEntry;
/* Varlena array */
MinipageEntry entry[1];
} Minipage;
/*
* Define the relevant info for a minipage for each
* column group.
*/
typedef struct MinipagePerColumnGroup
{
Minipage *minipage;
uint32 numMinipageEntries;
ItemPointerData tupleTid;
} MinipagePerColumnGroup;
/*
* I don't know the ideal value here. But let us put approximate
* 8 minipages per heap page.
*/
#define NUM_MINIPAGE_ENTRIES (((MaxHeapTupleSize)/8 - sizeof(HeapTupleHeaderData) - 64 * 3)\
/ sizeof(MinipageEntry))
/*
* Define a structure for the append-only relation block directory.
*/
typedef struct AppendOnlyBlockDirectory
{
Relation aoRel;
Snapshot appendOnlyMetaDataSnapshot;
Relation blkdirRel;
Relation blkdirIdx;
int numColumnGroups;
bool isAOCol;
bool *proj; /* projected columns, used only if isAOCol = TRUE */
MemoryContext memoryContext;
int totalSegfiles;
FileSegInfo **segmentFileInfo;
/*
* Current segment file number.
*/
int currentSegmentFileNum;
FileSegInfo *currentSegmentFileInfo;
/*
* Last minipage that contains an array of MinipageEntries.
*/
MinipagePerColumnGroup *minipages;
/*
* Some temporary space to help form tuples to be inserted into
* the block directory, and to help the index scan.
*/
Datum *values;
bool *nulls;
int numScanKeys;
ScanKey scanKeys;
StrategyNumber *strategyNumbers;
} AppendOnlyBlockDirectory;
typedef struct CurrentBlock
{
AppendOnlyBlockDirectoryEntry blockDirectoryEntry;
bool have;
int64 fileOffset;
int32 overallBlockLen;
int64 firstRowNum;
int64 lastRowNum;
bool isCompressed;
bool isLargeContent;
bool gotContents;
} CurrentBlock;
typedef struct CurrentSegmentFile
{
bool isOpen;
int num;
int64 logicalEof;
} CurrentSegmentFile;
extern void AppendOnlyBlockDirectoryEntry_GetBeginRange(
AppendOnlyBlockDirectoryEntry *directoryEntry,
int64 *fileOffset,
int64 *firstRowNum);
extern void AppendOnlyBlockDirectoryEntry_GetEndRange(
AppendOnlyBlockDirectoryEntry *directoryEntry,
int64 *afterFileOffset,
int64 *lastRowNum);
extern bool AppendOnlyBlockDirectoryEntry_RangeHasRow(
AppendOnlyBlockDirectoryEntry *directoryEntry,
int64 checkRowNum);
extern bool AppendOnlyBlockDirectory_GetEntry(
AppendOnlyBlockDirectory *blockDirectory,
AOTupleId *aoTupleId,
int columnGroupNo,
AppendOnlyBlockDirectoryEntry *directoryEntry);
extern void AppendOnlyBlockDirectory_Init_forInsert(
AppendOnlyBlockDirectory *blockDirectory,
Snapshot appendOnlyMetaDataSnapshot,
FileSegInfo *segmentFileInfo,
int64 lastSequence,
Relation aoRel,
int segno,
int numColumnGroups,
bool isAOCol);
extern void AppendOnlyBlockDirectory_Init_forSearch(
AppendOnlyBlockDirectory *blockDirectory,
Snapshot appendOnlyMetaDataSnapshot,
FileSegInfo **segmentFileInfo,
int totalSegfiles,
Relation aoRel,
int numColumnGroups,
bool isAOCol,
bool *proj);
extern void AppendOnlyBlockDirectory_Init_addCol(
AppendOnlyBlockDirectory *blockDirectory,
Snapshot appendOnlyMetaDataSnapshot,
FileSegInfo *segmentFileInfo,
Relation aoRel,
int segno,
int numColumnGroups,
bool isAOCol);
extern bool AppendOnlyBlockDirectory_InsertEntry(
AppendOnlyBlockDirectory *blockDirectory,
int columnGroupNo,
int64 firstRowNum,
int64 fileOffset,
int64 rowCount,
bool addColAction);
extern bool AppendOnlyBlockDirectory_addCol_InsertEntry(
AppendOnlyBlockDirectory *blockDirectory,
int columnGroupNo,
int64 firstRowNum,
int64 fileOffset,
int64 rowCount);
extern bool AppendOnlyBlockDirectory_DeleteEntry(
AppendOnlyBlockDirectory *blockDirectory,
AOTupleId *aoTupleId);
extern bool AppendOnlyBlockDirectory_DeleteEntryForUpdate(
AppendOnlyBlockDirectory *visibilityBlockDirectory,
AppendOnlyBlockDirectory *insertBlockDirectory,
AOTupleId* aoTupleId);
extern void AppendOnlyBlockDirectory_End_forInsert(
AppendOnlyBlockDirectory *blockDirectory);
extern void AppendOnlyBlockDirectory_End_forSearch(
AppendOnlyBlockDirectory *blockDirectory);
extern void AppendOnlyBlockDirectory_End_addCol(
AppendOnlyBlockDirectory *blockDirectory);
extern void AppendOnlyBlockDirectory_DeleteSegmentFile(
Relation aoRel,
Snapshot snapshot,
int segno,
int columnGroupNo);
#endif
| rvs/gpdb | src/include/cdb/cdbappendonlyblockdirectory.h | C | apache-2.0 | 5,366 |
/*
Open Asset Import Library (assimp)
----------------------------------------------------------------------
Copyright (c) 2006-2016, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of the assimp team, nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
written permission of the assimp team.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------------
*/
/** @file ASELoader.h
* @brief Definition of the .ASE importer class.
*/
#ifndef AI_ASELOADER_H_INCLUDED
#define AI_ASELOADER_H_INCLUDED
#include "BaseImporter.h"
#include "../include/assimp/types.h"
struct aiNode;
#include "ASEParser.h"
namespace Assimp {
// --------------------------------------------------------------------------------
/** Importer class for the 3DS ASE ASCII format.
*
*/
class ASEImporter : public BaseImporter {
public:
ASEImporter();
~ASEImporter();
public:
// -------------------------------------------------------------------
/** Returns whether the class can handle the format of the given file.
* See BaseImporter::CanRead() for details.
*/
bool CanRead( const std::string& pFile, IOSystem* pIOHandler,
bool checkSig) const;
protected:
// -------------------------------------------------------------------
/** Return importer meta information.
* See #BaseImporter::GetInfo for the details
*/
const aiImporterDesc* GetInfo () const;
// -------------------------------------------------------------------
/** Imports the given file into the given scene structure.
* See BaseImporter::InternReadFile() for details
*/
void InternReadFile( const std::string& pFile, aiScene* pScene,
IOSystem* pIOHandler);
// -------------------------------------------------------------------
/** Called prior to ReadFile().
* The function is a request to the importer to update its configuration
* basing on the Importer's configuration property list.
*/
void SetupProperties(const Importer* pImp);
private:
// -------------------------------------------------------------------
/** Generate normal vectors basing on smoothing groups
* (in some cases the normal are already contained in the file)
* \param mesh Mesh to work on
* \return false if the normals have been recomputed
*/
bool GenerateNormals(ASE::Mesh& mesh);
// -------------------------------------------------------------------
/** Create valid vertex/normal/UV/color/face lists.
* All elements are unique, faces have only one set of indices
* after this step occurs.
* \param mesh Mesh to work on
*/
void BuildUniqueRepresentation(ASE::Mesh& mesh);
/** Create one-material-per-mesh meshes ;-)
* \param mesh Mesh to work with
* \param Receives the list of all created meshes
*/
void ConvertMeshes(ASE::Mesh& mesh, std::vector<aiMesh*>& avOut);
// -------------------------------------------------------------------
/** Convert a material to a aiMaterial object
* \param mat Input material
*/
void ConvertMaterial(ASE::Material& mat);
// -------------------------------------------------------------------
/** Setup the final material indices for each mesh
*/
void BuildMaterialIndices();
// -------------------------------------------------------------------
/** Build the node graph
*/
void BuildNodes(std::vector<ASE::BaseNode*>& nodes);
// -------------------------------------------------------------------
/** Build output cameras
*/
void BuildCameras();
// -------------------------------------------------------------------
/** Build output lights
*/
void BuildLights();
// -------------------------------------------------------------------
/** Build output animations
*/
void BuildAnimations(const std::vector<ASE::BaseNode*>& nodes);
// -------------------------------------------------------------------
/** Add sub nodes to a node
* \param pcParent parent node to be filled
* \param szName Name of the parent node
* \param matrix Current transform
*/
void AddNodes(const std::vector<ASE::BaseNode*>& nodes,
aiNode* pcParent,const char* szName);
void AddNodes(const std::vector<ASE::BaseNode*>& nodes,
aiNode* pcParent,const char* szName,
const aiMatrix4x4& matrix);
void AddMeshes(const ASE::BaseNode* snode,aiNode* node);
// -------------------------------------------------------------------
/** Generate a default material and add it to the parser's list
* Called if no material has been found in the file (rare for ASE,
* but not impossible)
*/
void GenerateDefaultMaterial();
protected:
/** Parser instance */
ASE::Parser* mParser;
/** Buffer to hold the loaded file */
char* mBuffer;
/** Scene to be filled */
aiScene* pcScene;
/** Config options: Recompute the normals in every case - WA
for 3DS Max broken ASE normal export */
bool configRecomputeNormals;
bool noSkeletonMesh;
};
} // end of namespace Assimp
#endif // AI_3DSIMPORTER_H_INC
| EmilNorden/candle | lib/assimp-3.2/code/ASELoader.h | C | apache-2.0 | 6,553 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.pherf.workload.mt;
import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
import com.lmax.disruptor.WorkHandler;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.phoenix.coprocessor.TaskRegionObserver;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.pherf.PherfConstants;
import org.apache.phoenix.pherf.configuration.DataModel;
import org.apache.phoenix.pherf.configuration.Scenario;
import org.apache.phoenix.pherf.configuration.TenantGroup;
import org.apache.phoenix.pherf.util.PhoenixUtil;
import org.apache.phoenix.pherf.workload.mt.generators.TenantLoadEventGeneratorFactory.GeneratorType;
import org.apache.phoenix.pherf.workload.mt.MultiTenantTestUtils.TestConfigAndExpectations;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
/**
* Tests focused on tenant view operations and their validations
* Tests focused on tenant operation workloads {@link MultiTenantWorkload}
* and workload handlers {@link WorkHandler}
*/
@Category(NeedsOwnMiniClusterTest.class)
@RunWith(Parameterized.class)
public class TenantViewOperationWorkloadIT extends ParallelStatsDisabledIT {
private final MultiTenantTestUtils multiTenantTestUtils = new MultiTenantTestUtils();
private final Properties properties = PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false);
private final PhoenixUtil util = PhoenixUtil.create(true);
private final RegionCoprocessorEnvironment taskRegionEnvironment;
private GeneratorType generatorType;
public TenantViewOperationWorkloadIT(String generatorType) throws Exception {
this.generatorType = GeneratorType.valueOf(generatorType);
taskRegionEnvironment =
(RegionCoprocessorEnvironment)getUtility()
.getRSForFirstRegionInTable(
PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
.getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
.get(0).getCoprocessorHost()
.findCoprocessorEnvironment(TaskRegionObserver.class.getName());
}
@Parameterized.Parameters( name = "generator_type={0}" )
public static synchronized Collection<Object[]> data() {
List<Object[]> testCases = Lists.newArrayList();
testCases.add(new Object[] { "WEIGHTED" });
testCases.add(new Object[] { "UNIFORM" });
testCases.add(new Object[] { "SEQUENTIAL" });return testCases;
}
@Before
public void setup() throws Exception {
multiTenantTestUtils.applySchema(util,".*datamodel/.*test_mt.*.sql");
}
@After
public void cleanup() throws Exception {
util.deleteTables("PHERF.*BASE_TABLE");
if (taskRegionEnvironment != null) {
util.dropChildView(taskRegionEnvironment, 2);
}
}
@Test public void testVariousOperations() throws Exception {
DataModel model = multiTenantTestUtils.readTestDataModel(
"/scenario/test_mt_workload_template.xml");
for (Scenario scenario : model.getScenarios()) {
TestConfigAndExpectations settings = getTestConfigAndExpectations(scenario, generatorType);
scenario.setGeneratorName(generatorType.name());
scenario.getLoadProfile().setTenantDistribution(settings.tenantGroups);
multiTenantTestUtils.testVariousOperations(properties, model, scenario.getName(),
settings.expectedTenantGroups, settings.expectedOpGroups);
}
}
@Test public void testWorkloadWithOneHandler() throws Exception {
int numHandlers = 1;
DataModel model = multiTenantTestUtils.readTestDataModel(
"/scenario/test_mt_workload_template.xml");
for (Scenario scenario : model.getScenarios()) {
TestConfigAndExpectations settings = getTestConfigAndExpectations(scenario, generatorType);
scenario.setGeneratorName(generatorType.name());
scenario.getLoadProfile().setTenantDistribution(settings.tenantGroups);
multiTenantTestUtils.testWorkloadWithHandlers(properties, model, scenario.getName(),
numHandlers, settings.expectedTenantGroups, settings.expectedOpGroups);
}
}
@Test public void testWorkloadWithManyHandlers() throws Exception {
int numHandlers = 5;
DataModel model = multiTenantTestUtils.readTestDataModel(
"/scenario/test_mt_workload_template.xml");
for (Scenario scenario : model.getScenarios()) {
TestConfigAndExpectations settings = getTestConfigAndExpectations(scenario, generatorType);
scenario.setGeneratorName(generatorType.name());
scenario.getLoadProfile().setTenantDistribution(settings.tenantGroups);
multiTenantTestUtils.testWorkloadWithHandlers(properties, model, scenario.getName(),
numHandlers, settings.expectedTenantGroups, settings.expectedOpGroups);
}
}
private TestConfigAndExpectations getTestConfigAndExpectations(Scenario scenario, GeneratorType generatorType) {
TestConfigAndExpectations settings = new TestConfigAndExpectations();
switch (generatorType) {
case WEIGHTED:
settings.tenantGroups = scenario.getLoadProfile().getTenantDistribution();
settings.expectedOpGroups = scenario.getLoadProfile().getOpDistribution().size();
settings.expectedTenantGroups = scenario.getLoadProfile().getTenantDistribution().size();
default:
List<TenantGroup> tenantGroups = new ArrayList<>();
TenantGroup tg1 = new TenantGroup();
tg1.setId("tg1");
tg1.setNumTenants(10);
tg1.setWeight(100);
tenantGroups.add(tg1);
settings.tenantGroups = tenantGroups;
settings.expectedTenantGroups = 1;
settings.expectedOpGroups = scenario.getLoadProfile().getOpDistribution().size();;
break;
}
return settings;
}
}
| ankitsinghal/phoenix | phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/TenantViewOperationWorkloadIT.java | Java | apache-2.0 | 7,326 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2010 Red Hat Inc. and/or its affiliates and other contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.arquillian.test.spi.context;
import org.jboss.arquillian.core.spi.context.IdBoundContext;
/**
* ClassContext
*
* @author <a href="mailto:aslak@redhat.com">Aslak Knutsen</a>
* @version $Revision: $
*/
public interface ClassContext extends IdBoundContext<Class<?>>
{
}
| andreiserea/arquillian-core | test/spi/src/main/java/org/jboss/arquillian/test/spi/context/ClassContext.java | Java | apache-2.0 | 1,083 |
<!---
Copyright 2015 The AMP HTML Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS-IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# <a name="amp-lightbox"></a> `amp-lightbox`
<table>
<tr>
<td width="40%"><strong>Description</strong></td>
<td>Allows for a “lightbox” or similar experience where upon user interaction, a component expands to fill the viewport until it is closed again by the user.</td>
</tr>
<tr>
<td width="40%"><strong>Availability</strong></td>
<td>Stable</td>
</tr>
<tr>
<td width="40%"><strong>Required Script</strong></td>
<td><code><script async custom-element="amp-lightbox" src="https://cdn.ampproject.org/v0/amp-lightbox-0.1.js"></script></code></td>
</tr>
<tr>
<td width="40%"><strong>Examples</strong></td>
<td><a href="https://ampbyexample.com/components/amp-lightbox">amp-lightbox.html</a><br /><a href="https://github.com/ampproject/amphtml/blob/master/examples/everything.amp.html">everything.amp.html</a></td>
</tr>
</table>
## Behavior
The `amp-lightbox` component defines the child elements that will be displayed in a full-viewport overlay. It is triggered to take up the viewport when the user taps or clicks on an element with `on` attribute that targets `amp-lightbox` element’s `id`.
### Closing the lightbox
Pressing the escape key on the keyboard will close the lightbox.
Alternatively setting the `on` attribute on one or more elements within the lightbox and setting it's method to `close` will close the lightbox when the element is tapped or clicked.
Example:
```html
<button on="tap:my-lightbox">Open lightbox</button>
<amp-lightbox id="my-lightbox" layout="nodisplay">
<div class="lightbox">
<amp-img src="my-full-image.jpg" width=300 height=800 on="tap:my-lightbox.close">
</div>
</amp-lightbox>
```
## Styling
The `amp-lightbox` component can be styled with standard CSS.
## Validation errors
The following lists validation errors specific to the `amp-lightbox` tag
(see also `amp-lightbox` in the [AMP validator specification](https://github.com/ampproject/amphtml/blob/master/extensions/amp-lightbox/0.1/validator-amp-lightbox.protoascii)):
<table>
<tr>
<th width="40%"><strong>Validation Error</strong></th>
<th>Description</th>
</tr>
<tr>
<td width="40%"><a href="https://www.ampproject.org/docs/reference/validation_errors.html#tag-required-by-another-tag-is-missing">The 'example1' tag is missing or incorrect, but required by 'example2'.</a></td>
<td>Error thrown when required <code>amp-lightbox</code> extension <code>.js</code> script tag is missing or incorrect.</td>
</tr>
<tr>
<td width="40%"><a href="https://www.ampproject.org/docs/reference/validation_errors.html#implied-layout-isnt-supported-by-amp-tag">The implied layout 'example1' is not supported by tag 'example2'.</a></td>
<td>The only supported layout type is <code>NODISPLAY</code>. Error thrown if implied layout is any other value.</td>
</tr>
<tr>
<td width="40%"><a href="https://www.ampproject.org/docs/reference/validation_errors.html#specified-layout-isnt-supported-by-amp-tag">The specified layout 'example1' is not supported by tag 'example2'.</a></td>
<td>The only supported layout type is <code>NODISPLAY</code>. Error thrown if specified layout is any other value.</td>
</tr>
</table>
| ColombiaAnalytics/amphtml | extensions/amp-lightbox/amp-lightbox.md | Markdown | apache-2.0 | 3,814 |
using System;
using System.IO;
using System.Reflection;
using Chutzpah.Callbacks;
using Chutzpah.Models;
using Chutzpah.RunnerCallbacks;
using System.Linq;
using Chutzpah.Transformers;
using Chutzpah.Wrappers;
namespace Chutzpah
{
class Program
{
[STAThread]
public static int Main(string[] args)
{
if (args.Length == 0 || args[0] == "/?")
{
PrintHeader();
PrintUsage();
return -1;
}
AppDomain.CurrentDomain.UnhandledException += OnUnhandledException;
try
{
CommandLine commandLine = CommandLine.Parse(args);
if (!commandLine.NoLogo)
{
PrintHeader();
}
int failCount = RunTests(commandLine);
if (commandLine.Wait)
{
Console.WriteLine();
Console.Write("Press any key to continue...");
Console.ReadKey();
Console.WriteLine();
}
return failCount;
}
catch (ArgumentException ex)
{
Console.WriteLine();
Console.WriteLine("error: {0}", ex.Message);
return -1;
}
}
private static void PrintHeader()
{
Console.WriteLine("Chutzpah console test runner ({0}-bit .NET {1})", IntPtr.Size * 8, Environment.Version);
Console.WriteLine("Version {0}", Assembly.GetEntryAssembly().GetName().Version);
Console.WriteLine("Copyright (C) 2015 Matthew Manela (http://matthewmanela.com).");
}
static void OnUnhandledException(object sender, UnhandledExceptionEventArgs e)
{
var ex = e.ExceptionObject as Exception;
if (ex != null)
Console.WriteLine(ex.ToString());
else
Console.WriteLine("Error of unknown type thrown in applicaton domain");
Environment.Exit(1);
}
static void PrintUsage()
{
string executableName = Path.GetFileNameWithoutExtension(new Uri(Assembly.GetExecutingAssembly().CodeBase).LocalPath);
Console.WriteLine();
Console.WriteLine("usage: {0} [options]", executableName);
Console.WriteLine("usage: {0} <testFile> [options]", executableName);
Console.WriteLine();
Console.WriteLine("Valid options:");
Console.WriteLine(" /nologo : Do not show the copyright message");
Console.WriteLine(" /silent : Do not output running test count");
Console.WriteLine(" /teamcity : Forces TeamCity mode (normally auto-detected)");
Console.WriteLine(" /wait : Wait for input after completion");
Console.WriteLine(" /failOnError : Return a non-zero exit code if any script errors or timeouts occurs");
Console.WriteLine(" /debug : Print debugging information and tracing to console");
Console.WriteLine(" /trace : Logs tracing information to chutzpah.log");
Console.WriteLine(" /openInBrowser [name] : Launch the tests in a browser.");
Console.WriteLine(" : If optional name is provided will try to launch in that browser.");
Console.WriteLine(" : Name can be IE, Firefox, Chrome.");
Console.WriteLine(" /parallelism [n] : Max degree of parallelism for Chutzpah. Defaults to number of CPUs + 1");
Console.WriteLine(" : If you specify more than 1 the test output may be a bit jumbled");
Console.WriteLine(" /path path : Adds a path to a folder or file to the list of test paths to run.");
Console.WriteLine(" : Specify more than one to add multiple paths.");
Console.WriteLine(" : If you give a folder, it will be scanned for testable files.");
Console.WriteLine(" : (e.g. /path test1.html /path testFolder)");
Console.WriteLine(" /vsoutput : Print output in a format that the VS error list recognizes");
Console.WriteLine(" /coverage : Enable coverage collection");
Console.WriteLine(" /showFailureReport : Show a failure report after the test run. Usefull if you have a large number of tests.");
Console.WriteLine(" /settingsFileEnvironment : Sets the environment properties for a chutzpah.json settings file.");
Console.WriteLine(" : Specify more than one to add multiple environments.");
Console.WriteLine(" : (e.g. settingsFilePath;prop1=val1;prop2=val2).");
foreach (var transformer in new SummaryTransformerProvider().GetTransformers(new FileSystemWrapper()))
{
Console.WriteLine(" /{0} filename : {1}", transformer.Name, transformer.Description);
}
Console.WriteLine();
}
static int RunTests(CommandLine commandLine)
{
var testRunner = TestRunner.Create(debugEnabled: commandLine.Debug);
if (commandLine.Trace)
{
ChutzpahTracer.AddFileListener();
}
Console.WriteLine();
TestCaseSummary testResultsSummary = null;
try
{
var callback = commandLine.TeamCity
? (ITestMethodRunnerCallback)new TeamCityConsoleRunnerCallback()
: new StandardConsoleRunnerCallback(commandLine.Silent, commandLine.VsOutput, commandLine.ShowFailureReport, commandLine.FailOnError);
callback = new ParallelRunnerCallbackAdapter(callback);
var testOptions = new TestOptions
{
TestLaunchMode = commandLine.OpenInBrowser ? TestLaunchMode.FullBrowser : TestLaunchMode.HeadlessBrowser,
BrowserName = commandLine.BrowserName,
TestFileTimeoutMilliseconds = commandLine.TimeOutMilliseconds,
MaxDegreeOfParallelism = commandLine.Parallelism,
ChutzpahSettingsFileEnvironments = commandLine.SettingsFileEnvironments,
CoverageOptions = new CoverageOptions
{
Enabled = commandLine.Coverage,
IncludePatterns = (commandLine.CoverageIncludePatterns ?? "").Split(new[]{','},StringSplitOptions.RemoveEmptyEntries),
ExcludePatterns = (commandLine.CoverageExcludePatterns ?? "").Split(new[]{','},StringSplitOptions.RemoveEmptyEntries),
IgnorePatterns = (commandLine.CoverageIgnorePatterns ?? "").Split(new[]{','},StringSplitOptions.RemoveEmptyEntries)
}
};
if (!commandLine.Discovery)
{
testResultsSummary = testRunner.RunTests(commandLine.Files, testOptions, callback);
ProcessTestSummaryTransformers(commandLine, testResultsSummary);
}
else
{
Console.WriteLine("Test Discovery");
var tests = testRunner.DiscoverTests(commandLine.Files, testOptions).ToList();
Console.WriteLine("\nDiscovered {0} tests", tests.Count);
foreach (var test in tests)
{
Console.WriteLine("Test '{0}:{1}' from '{2}'", test.ModuleName, test.TestName, test.InputTestFile);
}
return 0;
}
}
catch (ArgumentException ex)
{
Console.WriteLine(ex.Message);
}
var failedCount = testResultsSummary.FailedCount;
if (commandLine.FailOnError && testResultsSummary.Errors.Any())
{
return failedCount > 0 ? failedCount : 1;
}
return failedCount;
}
private static void ProcessTestSummaryTransformers(CommandLine commandLine, TestCaseSummary testResultsSummary)
{
var transformers = new SummaryTransformerProvider().GetTransformers(new FileSystemWrapper());
foreach (var transformer in transformers.Where(x => commandLine.UnmatchedArguments.ContainsKey(x.Name)))
{
var path = commandLine.UnmatchedArguments[transformer.Name];
transformer.Transform(testResultsSummary, path);
}
}
}
} | johnm25/chutzpah | ConsoleRunner/Program.cs | C# | apache-2.0 | 9,487 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Immutable;
using System.Diagnostics;
using Microsoft.CodeAnalysis.CSharp.Symbols;
using Microsoft.CodeAnalysis.PooledObjects;
using Roslyn.Utilities;
namespace Microsoft.CodeAnalysis.CSharp
{
internal sealed class LoweredDynamicOperationFactory
{
private readonly SyntheticBoundNodeFactory _factory;
private readonly int _methodOrdinal;
private NamedTypeSymbol _currentDynamicCallSiteContainer;
private int _callSiteIdDispenser;
internal LoweredDynamicOperationFactory(SyntheticBoundNodeFactory factory, int methodOrdinal)
{
Debug.Assert(factory != null);
_factory = factory;
_methodOrdinal = methodOrdinal;
}
// We could read the values of the following enums from metadata instead of hardcoding them here but
// - they can never change since existing programs have the values inlined and would be broken if the values changed their meaning,
// - if any new flags are added to the runtime binder the compiler will change as well to produce them.
// The only scenario that is not supported by hardcoding the values is when a completely new Framework is created
// that redefines these constants and is not supposed to run existing programs.
/// <summary>
/// Corresponds to Microsoft.CSharp.RuntimeBinder.CSharpBinderFlags.
/// </summary>
[Flags]
private enum CSharpBinderFlags
{
None = 0,
CheckedContext = 1,
InvokeSimpleName = 2,
InvokeSpecialName = 4,
BinaryOperationLogical = 8,
ConvertExplicit = 16,
ConvertArrayIndex = 32,
ResultIndexed = 64,
ValueFromCompoundAssignment = 128,
ResultDiscarded = 256,
}
/// <summary>
/// Corresponds to Microsoft.CSharp.RuntimeBinder.CSharpArgumentInfoFlags.
/// </summary>
[Flags]
private enum CSharpArgumentInfoFlags
{
None = 0,
UseCompileTimeType = 1,
Constant = 2,
NamedArgument = 4,
IsRef = 8,
IsOut = 16,
IsStaticType = 32,
}
internal LoweredDynamicOperation MakeDynamicConversion(
BoundExpression loweredOperand,
bool isExplicit,
bool isArrayIndex,
bool isChecked,
TypeSymbol resultType)
{
_factory.Syntax = loweredOperand.Syntax;
CSharpBinderFlags binderFlags = 0;
Debug.Assert(!isExplicit || !isArrayIndex);
if (isChecked)
{
binderFlags |= CSharpBinderFlags.CheckedContext;
}
if (isExplicit)
{
binderFlags |= CSharpBinderFlags.ConvertExplicit;
}
if (isArrayIndex)
{
binderFlags |= CSharpBinderFlags.ConvertArrayIndex;
}
var loweredArguments = ImmutableArray.Create(loweredOperand);
var binderConstruction = MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__Convert, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// target type:
_factory.Typeof(resultType),
// context:
_factory.TypeofDynamicOperationContextType()
});
return MakeDynamicOperation(binderConstruction, null, RefKind.None, loweredArguments, default(ImmutableArray<RefKind>), null, resultType);
}
internal LoweredDynamicOperation MakeDynamicUnaryOperator(
UnaryOperatorKind operatorKind,
BoundExpression loweredOperand,
TypeSymbol resultType)
{
Debug.Assert(operatorKind.IsDynamic());
_factory.Syntax = loweredOperand.Syntax;
CSharpBinderFlags binderFlags = 0;
if (operatorKind.IsChecked())
{
binderFlags |= CSharpBinderFlags.CheckedContext;
}
var loweredArguments = ImmutableArray.Create(loweredOperand);
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__UnaryOperation, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// expression type:
_factory.Literal((int)operatorKind.ToExpressionType()),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments)
}) : null;
return MakeDynamicOperation(binderConstruction, null, RefKind.None, loweredArguments, default(ImmutableArray<RefKind>), null, resultType);
}
internal LoweredDynamicOperation MakeDynamicBinaryOperator(
BinaryOperatorKind operatorKind,
BoundExpression loweredLeft,
BoundExpression loweredRight,
bool isCompoundAssignment,
TypeSymbol resultType)
{
Debug.Assert(operatorKind.IsDynamic());
_factory.Syntax = loweredLeft.Syntax;
CSharpBinderFlags binderFlags = 0;
if (operatorKind.IsChecked())
{
binderFlags |= CSharpBinderFlags.CheckedContext;
}
if (operatorKind.IsLogical())
{
binderFlags |= CSharpBinderFlags.BinaryOperationLogical;
}
var loweredArguments = ImmutableArray.Create<BoundExpression>(loweredLeft, loweredRight);
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__BinaryOperation, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// expression type:
_factory.Literal((int)operatorKind.ToExpressionType(isCompoundAssignment)),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments)
}) : null;
return MakeDynamicOperation(binderConstruction, null, RefKind.None, loweredArguments, default(ImmutableArray<RefKind>), null, resultType);
}
internal LoweredDynamicOperation MakeDynamicMemberInvocation(
string name,
BoundExpression loweredReceiver,
ImmutableArray<TypeWithAnnotations> typeArgumentsWithAnnotations,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<string> argumentNames,
ImmutableArray<RefKind> refKinds,
bool hasImplicitReceiver,
bool resultDiscarded)
{
_factory.Syntax = loweredReceiver.Syntax;
CSharpBinderFlags binderFlags = 0;
if (hasImplicitReceiver && !_factory.TopLevelMethod.IsStatic)
{
binderFlags |= CSharpBinderFlags.InvokeSimpleName;
}
TypeSymbol resultType;
if (resultDiscarded)
{
binderFlags |= CSharpBinderFlags.ResultDiscarded;
resultType = _factory.SpecialType(SpecialType.System_Void);
}
else
{
resultType = AssemblySymbol.DynamicType;
}
RefKind receiverRefKind;
bool receiverIsStaticType;
if (loweredReceiver.Kind == BoundKind.TypeExpression)
{
loweredReceiver = _factory.Typeof(((BoundTypeExpression)loweredReceiver).Type);
receiverRefKind = RefKind.None;
receiverIsStaticType = true;
}
else
{
receiverRefKind = GetReceiverRefKind(loweredReceiver);
receiverIsStaticType = false;
}
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__InvokeMember, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// member name:
_factory.Literal(name),
// type arguments:
typeArgumentsWithAnnotations.IsDefaultOrEmpty ?
_factory.Null(_factory.WellKnownArrayType(WellKnownType.System_Type)) :
_factory.ArrayOrEmpty(_factory.WellKnownType(WellKnownType.System_Type), _factory.TypeOfs(typeArgumentsWithAnnotations)),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, argumentNames, refKinds, loweredReceiver, receiverRefKind, receiverIsStaticType)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, receiverRefKind, loweredArguments, refKinds, null, resultType);
}
internal LoweredDynamicOperation MakeDynamicEventAccessorInvocation(
string accessorName,
BoundExpression loweredReceiver,
BoundExpression loweredHandler)
{
_factory.Syntax = loweredReceiver.Syntax;
CSharpBinderFlags binderFlags = CSharpBinderFlags.InvokeSpecialName | CSharpBinderFlags.ResultDiscarded;
var loweredArguments = ImmutableArray<BoundExpression>.Empty;
var resultType = AssemblySymbol.DynamicType;
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__InvokeMember, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// member name:
_factory.Literal(accessorName),
// type arguments:
_factory.Null(_factory.WellKnownArrayType(WellKnownType.System_Type)),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, loweredReceiver: loweredReceiver, loweredRight: loweredHandler)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, RefKind.None, loweredArguments, default(ImmutableArray<RefKind>), loweredHandler, resultType);
}
internal LoweredDynamicOperation MakeDynamicInvocation(
BoundExpression loweredReceiver,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<string> argumentNames,
ImmutableArray<RefKind> refKinds,
bool resultDiscarded)
{
_factory.Syntax = loweredReceiver.Syntax;
TypeSymbol resultType;
CSharpBinderFlags binderFlags = 0;
if (resultDiscarded)
{
binderFlags |= CSharpBinderFlags.ResultDiscarded;
resultType = _factory.SpecialType(SpecialType.System_Void);
}
else
{
resultType = AssemblySymbol.DynamicType;
}
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__Invoke, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, argumentNames, refKinds, loweredReceiver)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, RefKind.None, loweredArguments, refKinds, null, resultType);
}
internal LoweredDynamicOperation MakeDynamicConstructorInvocation(
SyntaxNode syntax,
TypeSymbol type,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<string> argumentNames,
ImmutableArray<RefKind> refKinds)
{
_factory.Syntax = syntax;
var loweredReceiver = _factory.Typeof(type);
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__InvokeConstructor, new[]
{
// flags:
_factory.Literal(0),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, argumentNames, refKinds, loweredReceiver, receiverIsStaticType: true)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, RefKind.None, loweredArguments, refKinds, null, type);
}
internal LoweredDynamicOperation MakeDynamicGetMember(
BoundExpression loweredReceiver,
string name,
bool resultIndexed)
{
_factory.Syntax = loweredReceiver.Syntax;
CSharpBinderFlags binderFlags = 0;
if (resultIndexed)
{
binderFlags |= CSharpBinderFlags.ResultIndexed;
}
var loweredArguments = ImmutableArray<BoundExpression>.Empty;
var resultType = DynamicTypeSymbol.Instance;
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__GetMember, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// name:
_factory.Literal(name),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, loweredReceiver: loweredReceiver)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, RefKind.None, loweredArguments, default(ImmutableArray<RefKind>), null, resultType);
}
internal LoweredDynamicOperation MakeDynamicSetMember(
BoundExpression loweredReceiver,
string name,
BoundExpression loweredRight,
bool isCompoundAssignment = false,
bool isChecked = false)
{
_factory.Syntax = loweredReceiver.Syntax;
CSharpBinderFlags binderFlags = 0;
if (isCompoundAssignment)
{
binderFlags |= CSharpBinderFlags.ValueFromCompoundAssignment;
if (isChecked)
{
binderFlags |= CSharpBinderFlags.CheckedContext;
}
}
var loweredArguments = ImmutableArray<BoundExpression>.Empty;
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__SetMember, new[]
{
// flags:
_factory.Literal((int)binderFlags),
// name:
_factory.Literal(name),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, loweredReceiver: loweredReceiver, loweredRight: loweredRight)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, RefKind.None, loweredArguments, default(ImmutableArray<RefKind>), loweredRight, AssemblySymbol.DynamicType);
}
internal LoweredDynamicOperation MakeDynamicGetIndex(
BoundExpression loweredReceiver,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<string> argumentNames,
ImmutableArray<RefKind> refKinds)
{
_factory.Syntax = loweredReceiver.Syntax;
var resultType = DynamicTypeSymbol.Instance;
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__GetIndex, new[]
{
// flags (unused):
_factory.Literal((int)CSharpBinderFlags.None),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, argumentNames, refKinds, loweredReceiver: loweredReceiver)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, RefKind.None, loweredArguments, refKinds, null, resultType);
}
internal LoweredDynamicOperation MakeDynamicSetIndex(
BoundExpression loweredReceiver,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<string> argumentNames,
ImmutableArray<RefKind> refKinds,
BoundExpression loweredRight,
bool isCompoundAssignment = false,
bool isChecked = false)
{
CSharpBinderFlags binderFlags = 0;
if (isCompoundAssignment)
{
binderFlags |= CSharpBinderFlags.ValueFromCompoundAssignment;
if (isChecked)
{
binderFlags |= CSharpBinderFlags.CheckedContext;
}
}
var loweredReceiverRefKind = GetReceiverRefKind(loweredReceiver);
var resultType = DynamicTypeSymbol.Instance;
MethodSymbol argumentInfoFactory = GetArgumentInfoFactory();
var binderConstruction = ((object)argumentInfoFactory != null) ? MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__SetIndex, new[]
{
// flags (unused):
_factory.Literal((int)binderFlags),
// context:
_factory.TypeofDynamicOperationContextType(),
// argument infos:
MakeCallSiteArgumentInfos(argumentInfoFactory, loweredArguments, argumentNames, refKinds, loweredReceiver, loweredReceiverRefKind, loweredRight: loweredRight)
}) : null;
return MakeDynamicOperation(binderConstruction, loweredReceiver, loweredReceiverRefKind, loweredArguments, refKinds, loweredRight, resultType);
}
internal LoweredDynamicOperation MakeDynamicIsEventTest(string name, BoundExpression loweredReceiver)
{
_factory.Syntax = loweredReceiver.Syntax;
var resultType = _factory.SpecialType(SpecialType.System_Boolean);
var binderConstruction = MakeBinderConstruction(WellKnownMember.Microsoft_CSharp_RuntimeBinder_Binder__IsEvent, new[]
{
// flags (unused):
_factory.Literal((int)0),
// member name:
_factory.Literal(name),
// context:
_factory.TypeofDynamicOperationContextType()
});
return MakeDynamicOperation(binderConstruction, loweredReceiver, RefKind.None, ImmutableArray<BoundExpression>.Empty, default(ImmutableArray<RefKind>), null, resultType);
}
private MethodSymbol GetArgumentInfoFactory()
{
return _factory.WellKnownMethod(WellKnownMember.Microsoft_CSharp_RuntimeBinder_CSharpArgumentInfo__Create, isOptional: false);
}
private BoundExpression MakeBinderConstruction(WellKnownMember factoryMethod, BoundExpression[] args)
{
var binderFactory = _factory.WellKnownMember(factoryMethod);
if ((object)binderFactory == null)
{
return null;
}
return _factory.Call(null, (MethodSymbol)binderFactory, args.AsImmutableOrNull());
}
// If we have a struct calling object, then we need to pass it by ref, provided
// that it was an Lvalue. For instance,
// Struct s = ...; dynamic d = ...;
// s.M(d); // becomes Site(ref s, d)
// however
// dynamic d = ...;
// GetS().M(d); // becomes Site(GetS(), d) without ref on the target obj arg
internal static RefKind GetReceiverRefKind(BoundExpression loweredReceiver)
{
if (!loweredReceiver.Type.IsValueType)
{
return RefKind.None;
}
switch (loweredReceiver.Kind)
{
case BoundKind.Local:
case BoundKind.Parameter:
case BoundKind.ArrayAccess:
case BoundKind.ThisReference:
case BoundKind.PointerIndirectionOperator:
case BoundKind.PointerElementAccess:
case BoundKind.RefValueOperator:
return RefKind.Ref;
case BoundKind.BaseReference:
// base dynamic dispatch is not supported, an error has already been reported
case BoundKind.TypeExpression:
throw ExceptionUtilities.UnexpectedValue(loweredReceiver.Kind);
}
return RefKind.None;
}
internal BoundExpression MakeCallSiteArgumentInfos(
MethodSymbol argumentInfoFactory,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<string> argumentNames = default(ImmutableArray<string>),
ImmutableArray<RefKind> refKinds = default(ImmutableArray<RefKind>),
BoundExpression loweredReceiver = null,
RefKind receiverRefKind = RefKind.None,
bool receiverIsStaticType = false,
BoundExpression loweredRight = null)
{
const string NoName = null;
Debug.Assert(argumentNames.IsDefaultOrEmpty || loweredArguments.Length == argumentNames.Length);
Debug.Assert(refKinds.IsDefault || loweredArguments.Length == refKinds.Length);
Debug.Assert(!receiverIsStaticType || receiverRefKind == RefKind.None);
var infos = new BoundExpression[(loweredReceiver != null ? 1 : 0) + loweredArguments.Length + (loweredRight != null ? 1 : 0)];
int j = 0;
if (loweredReceiver != null)
{
infos[j++] = GetArgumentInfo(argumentInfoFactory, loweredReceiver, NoName, receiverRefKind, receiverIsStaticType);
}
for (int i = 0; i < loweredArguments.Length; i++)
{
infos[j++] = GetArgumentInfo(
argumentInfoFactory,
loweredArguments[i],
argumentNames.IsDefaultOrEmpty ? NoName : argumentNames[i],
refKinds.IsDefault ? RefKind.None : refKinds[i],
isStaticType: false);
}
if (loweredRight != null)
{
infos[j++] = GetArgumentInfo(argumentInfoFactory, loweredRight, NoName, RefKind.None, isStaticType: false);
}
return _factory.ArrayOrEmpty(argumentInfoFactory.ContainingType, infos);
}
internal LoweredDynamicOperation MakeDynamicOperation(
BoundExpression binderConstruction,
BoundExpression loweredReceiver,
RefKind receiverRefKind,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<RefKind> refKinds,
BoundExpression loweredRight,
TypeSymbol resultType)
{
Debug.Assert(!loweredArguments.IsDefault);
// get well-known types and members we need:
NamedTypeSymbol delegateTypeOverMethodTypeParameters = GetDelegateType(loweredReceiver, receiverRefKind, loweredArguments, refKinds, loweredRight, resultType);
NamedTypeSymbol callSiteTypeGeneric = _factory.WellKnownType(WellKnownType.System_Runtime_CompilerServices_CallSite_T);
MethodSymbol callSiteFactoryGeneric = _factory.WellKnownMethod(WellKnownMember.System_Runtime_CompilerServices_CallSite_T__Create);
FieldSymbol callSiteTargetFieldGeneric = (FieldSymbol)_factory.WellKnownMember(WellKnownMember.System_Runtime_CompilerServices_CallSite_T__Target);
MethodSymbol delegateInvoke;
if (binderConstruction == null ||
(object)delegateTypeOverMethodTypeParameters == null ||
delegateTypeOverMethodTypeParameters.IsErrorType() ||
(object)(delegateInvoke = delegateTypeOverMethodTypeParameters.DelegateInvokeMethod) == null ||
callSiteTypeGeneric.IsErrorType() ||
(object)callSiteFactoryGeneric == null ||
(object)callSiteTargetFieldGeneric == null)
{
// CS1969: One or more types required to compile a dynamic expression cannot be found.
// Dev11 reports it with source location for each dynamic operation, which results in many error messages.
// The diagnostic that names the specific missing type or member has already been reported.
_factory.Diagnostics.Add(ErrorCode.ERR_DynamicRequiredTypesMissing, NoLocation.Singleton);
return LoweredDynamicOperation.Bad(loweredReceiver, loweredArguments, loweredRight, resultType);
}
if ((object)_currentDynamicCallSiteContainer == null)
{
_currentDynamicCallSiteContainer = CreateCallSiteContainer(_factory, _methodOrdinal);
}
var containerDef = (SynthesizedContainer)_currentDynamicCallSiteContainer.OriginalDefinition;
var methodToContainerTypeParametersMap = containerDef.TypeMap;
ImmutableArray<LocalSymbol> temps = MakeTempsForDiscardArguments(ref loweredArguments);
var callSiteType = callSiteTypeGeneric.Construct(new[] { delegateTypeOverMethodTypeParameters });
var callSiteFactoryMethod = callSiteFactoryGeneric.AsMember(callSiteType);
var callSiteTargetField = callSiteTargetFieldGeneric.AsMember(callSiteType);
var callSiteField = DefineCallSiteStorageSymbol(containerDef, delegateTypeOverMethodTypeParameters, methodToContainerTypeParametersMap);
var callSiteFieldAccess = _factory.Field(null, callSiteField);
var callSiteArguments = GetCallSiteArguments(callSiteFieldAccess, loweredReceiver, loweredArguments, loweredRight);
var nullCallSite = _factory.Null(callSiteField.Type);
var siteInitialization = _factory.Conditional(
_factory.ObjectEqual(callSiteFieldAccess, nullCallSite),
_factory.AssignmentExpression(callSiteFieldAccess, _factory.Call(null, callSiteFactoryMethod, binderConstruction)),
nullCallSite,
callSiteField.Type);
var siteInvocation = _factory.Call(
_factory.Field(callSiteFieldAccess, callSiteTargetField),
delegateInvoke,
callSiteArguments);
return new LoweredDynamicOperation(_factory, siteInitialization, siteInvocation, resultType, temps);
}
/// <summary>
/// If there are any discards in the arguments, create locals for each, updates the arguments and
/// returns the symbols that were created.
/// Returns default if no discards found.
/// </summary>
private ImmutableArray<LocalSymbol> MakeTempsForDiscardArguments(ref ImmutableArray<BoundExpression> loweredArguments)
{
int discardCount = loweredArguments.Count(a => a.Kind == BoundKind.DiscardExpression);
if (discardCount == 0)
{
return ImmutableArray<LocalSymbol>.Empty;
}
ArrayBuilder<LocalSymbol> temporariesBuilder = ArrayBuilder<LocalSymbol>.GetInstance(discardCount);
loweredArguments = _factory.MakeTempsForDiscardArguments(loweredArguments, temporariesBuilder);
return temporariesBuilder.ToImmutableAndFree();
}
private static NamedTypeSymbol CreateCallSiteContainer(SyntheticBoundNodeFactory factory, int methodOrdinal)
{
// We don't reuse call-sites during EnC. Each edit creates a new container and sites.
int generation = factory.CompilationState.ModuleBuilderOpt.CurrentGenerationOrdinal;
var containerName = GeneratedNames.MakeDynamicCallSiteContainerName(methodOrdinal, generation);
var synthesizedContainer = new DynamicSiteContainer(containerName, factory.TopLevelMethod);
factory.AddNestedType(synthesizedContainer);
if (factory.TopLevelMethod.IsGenericMethod)
{
return synthesizedContainer.Construct(factory.TopLevelMethod.TypeParameters.Cast<TypeParameterSymbol, TypeSymbol>());
}
return synthesizedContainer;
}
internal FieldSymbol DefineCallSiteStorageSymbol(NamedTypeSymbol containerDefinition, NamedTypeSymbol delegateTypeOverMethodTypeParameters, TypeMap methodToContainerTypeParametersMap)
{
var fieldName = GeneratedNames.MakeDynamicCallSiteFieldName(_callSiteIdDispenser++);
var delegateTypeOverContainerTypeParameters = methodToContainerTypeParametersMap.SubstituteNamedType(delegateTypeOverMethodTypeParameters);
var callSiteType = _factory.Compilation.GetWellKnownType(WellKnownType.System_Runtime_CompilerServices_CallSite_T).Construct(new[] { delegateTypeOverContainerTypeParameters });
var field = new SynthesizedFieldSymbol(containerDefinition, callSiteType, fieldName, isPublic: true, isStatic: true);
_factory.AddField(containerDefinition, field);
return _currentDynamicCallSiteContainer.IsGenericType ? field.AsMember(_currentDynamicCallSiteContainer) : field;
}
internal NamedTypeSymbol GetDelegateType(
BoundExpression loweredReceiver,
RefKind receiverRefKind,
ImmutableArray<BoundExpression> loweredArguments,
ImmutableArray<RefKind> refKinds,
BoundExpression loweredRight,
TypeSymbol resultType)
{
Debug.Assert(refKinds.IsDefaultOrEmpty || refKinds.Length == loweredArguments.Length);
var callSiteType = _factory.WellKnownType(WellKnownType.System_Runtime_CompilerServices_CallSite);
if (callSiteType.IsErrorType())
{
return null;
}
var delegateSignature = MakeCallSiteDelegateSignature(callSiteType, loweredReceiver, loweredArguments, loweredRight, resultType);
bool returnsVoid = resultType.SpecialType == SpecialType.System_Void;
bool hasByRefs = receiverRefKind != RefKind.None || !refKinds.IsDefaultOrEmpty;
if (!hasByRefs)
{
var wkDelegateType = returnsVoid ?
WellKnownTypes.GetWellKnownActionDelegate(invokeArgumentCount: delegateSignature.Length) :
WellKnownTypes.GetWellKnownFunctionDelegate(invokeArgumentCount: delegateSignature.Length - 1);
if (wkDelegateType != WellKnownType.Unknown)
{
var delegateType = _factory.Compilation.GetWellKnownType(wkDelegateType);
if (!delegateType.HasUseSiteError)
{
return delegateType.Construct(delegateSignature);
}
}
}
BitVector byRefs;
if (hasByRefs)
{
byRefs = BitVector.Create(1 + (loweredReceiver != null ? 1 : 0) + loweredArguments.Length + (loweredRight != null ? 1 : 0));
int j = 1;
if (loweredReceiver != null)
{
byRefs[j++] = receiverRefKind != RefKind.None;
}
if (!refKinds.IsDefault)
{
for (int i = 0; i < refKinds.Length; i++, j++)
{
if (refKinds[i] != RefKind.None)
{
byRefs[j] = true;
}
}
}
}
else
{
byRefs = default(BitVector);
}
int parameterCount = delegateSignature.Length - (returnsVoid ? 0 : 1);
int generation = _factory.CompilationState.ModuleBuilderOpt.CurrentGenerationOrdinal;
var synthesizedType = _factory.Compilation.AnonymousTypeManager.SynthesizeDelegate(parameterCount, byRefs, returnsVoid, generation);
return synthesizedType.Construct(delegateSignature);
}
internal BoundExpression GetArgumentInfo(
MethodSymbol argumentInfoFactory,
BoundExpression boundArgument,
string name,
RefKind refKind,
bool isStaticType)
{
CSharpArgumentInfoFlags flags = 0;
if (isStaticType)
{
flags |= CSharpArgumentInfoFlags.IsStaticType;
}
if (name != null)
{
flags |= CSharpArgumentInfoFlags.NamedArgument;
}
Debug.Assert(refKind == RefKind.None || refKind == RefKind.Ref || refKind == RefKind.Out, "unexpected refKind in dynamic");
// by-ref type doesn't trigger dynamic dispatch and it can't be a null literal => set UseCompileTimeType
if (refKind == RefKind.Out)
{
flags |= CSharpArgumentInfoFlags.IsOut | CSharpArgumentInfoFlags.UseCompileTimeType;
}
else if (refKind == RefKind.Ref)
{
flags |= CSharpArgumentInfoFlags.IsRef | CSharpArgumentInfoFlags.UseCompileTimeType;
}
var argType = boundArgument.Type;
// Check "literal" constant.
// What the runtime binder does with this LiteralConstant flag is just to create a constant,
// which is a compelling enough reason to make sure that on the production end of the binder
// data, we do the inverse (i.e., use the LiteralConstant flag whenever we encounter a constant
// argument.
// And in fact, the bug being fixed with this change is that the compiler will consider constants
// for numeric and enum conversions even if they are not literals (such as, (1-1) --> enum), but
// the runtime binder didn't. So we do need to set this flag whenever we see a constant.
// But the complication is that null values lose their type when they get to the runtime binder,
// and so we need a way to distinguish a null constant of any given type from the null literal.
// The design is simple! We use UseCompileTimeType to determine whether we care about the type of
// a null constant argument, so that the null literal gets "LiteralConstant" whereas every other
// constant gets "LiteralConstant | UseCompileTimeType". Because obviously UseCompileTimeType is
// wrong for the null literal.
// We care, because we want to prevent this from working:
//
// const C x = null;
// class C { public void M(SomeUnrelatedReferenceType x) { } }
// ...
// dynamic d = new C(); d.M(x); // This will pass a null constant and the type is gone!
//
// as well as the alternative where x is a const null of type object.
if (boundArgument.ConstantValue != null)
{
flags |= CSharpArgumentInfoFlags.Constant;
}
// Check compile time type.
// See also DynamicRewriter::GenerateCallingObjectFlags.
if ((object)argType != null && !argType.IsDynamic())
{
flags |= CSharpArgumentInfoFlags.UseCompileTimeType;
}
return _factory.Call(null, argumentInfoFactory, _factory.Literal((int)flags), _factory.Literal(name));
}
internal static ImmutableArray<BoundExpression> GetCallSiteArguments(BoundExpression callSiteFieldAccess, BoundExpression receiver, ImmutableArray<BoundExpression> arguments, BoundExpression right)
{
var result = new BoundExpression[1 + (receiver != null ? 1 : 0) + arguments.Length + (right != null ? 1 : 0)];
int j = 0;
result[j++] = callSiteFieldAccess;
if (receiver != null)
{
result[j++] = receiver;
}
arguments.CopyTo(result, j);
j += arguments.Length;
if (right != null)
{
result[j++] = right;
}
return result.AsImmutableOrNull();
}
internal TypeSymbol[] MakeCallSiteDelegateSignature(TypeSymbol callSiteType, BoundExpression receiver, ImmutableArray<BoundExpression> arguments, BoundExpression right, TypeSymbol resultType)
{
var systemObjectType = _factory.SpecialType(SpecialType.System_Object);
var result = new TypeSymbol[1 + (receiver != null ? 1 : 0) + arguments.Length + (right != null ? 1 : 0) + (resultType.SpecialType == SpecialType.System_Void ? 0 : 1)];
int j = 0;
// CallSite:
result[j++] = callSiteType;
// receiver:
if (receiver != null)
{
result[j++] = receiver.Type ?? systemObjectType;
}
// argument types:
for (int i = 0; i < arguments.Length; i++)
{
result[j++] = arguments[i].Type ?? systemObjectType;
}
// right hand side of an assignment:
if (right != null)
{
result[j++] = right.Type ?? systemObjectType;
}
// return type:
if (j < result.Length)
{
result[j++] = resultType ?? systemObjectType;
}
return result;
}
}
}
| balazssimon/meta-cs | src/Main/MetaDslx.CodeAnalysis.CSharp/Lowering/LocalRewriter/LoweredDynamicOperationFactory.cs | C# | apache-2.0 | 39,696 |
/*
* Copyright 2017 RedRoma, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tech.aroma.application.service.reactions.actions;
import org.apache.thrift.TException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import tech.aroma.thrift.Message;
import tech.sirwellington.alchemy.test.junit.runners.AlchemyTestRunner;
import tech.sirwellington.alchemy.test.junit.runners.Repeat;
import static org.hamcrest.Matchers.isEmptyOrNullString;
import static org.hamcrest.Matchers.not;
import static org.junit.Assert.assertThat;
import static tech.aroma.thrift.generators.MessageGenerators.messages;
import static tech.sirwellington.alchemy.generator.AlchemyGenerator.Get.one;
import static tech.sirwellington.alchemy.test.junit.ThrowableAssertion.*;
/**
*
* @author SirWellington
*/
@Repeat(10)
@RunWith(AlchemyTestRunner.class)
public class IgnoreInboxTest
{
private Message message;
private IgnoreInbox instance;
@Before
public void setUp() throws Exception
{
message = one(messages());
instance = new IgnoreInbox();
}
@Test
public void testActOnMessage() throws Exception
{
instance.actOnMessage(message);
}
@Test
public void testActOnMessageWithBadArgs() throws Exception
{
assertThrows(() -> instance.actOnMessage(new Message()))
.isInstanceOf(TException.class);
}
@Test
public void testToString()
{
assertThat(instance.toString(), not(isEmptyOrNullString()));
}
}
| RedRoma/aroma-application-service | src/test/java/tech/aroma/application/service/reactions/actions/IgnoreInboxTest.java | Java | apache-2.0 | 2,079 |
/***********************************************************************\
|* *|
|* Copyright (c) 1995-2008 by NVIDIA Corp. All rights reserved. *|
|* *|
|* This material constitutes the trade secrets and confidential, *|
|* proprietary information of NVIDIA, Corp. This material is not to *|
|* be disclosed, reproduced, copied, or used in any manner not *|
|* permitted under license from NVIDIA, Corp. *|
|* *|
\***********************************************************************/
#ifndef NVENCODERAPI_H
#define NVENCODERAPI_H
#include "NVEncodeDataTypes.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _WIN32
#define NVENCAPI __stdcall
#else
#define NVENCAPI
#endif
typedef unsigned char *(NVENCAPI *PFNACQUIREBITSTREAM)(int *pBufferSize, void *pUserdata);
typedef void (NVENCAPI *PFNRELEASEBITSTREAM)(int nBytesInBuffer, unsigned char *cb, void *pUserdata);
typedef void (NVENCAPI *PFNONBEGINFRAME)(const NVVE_BeginFrameInfo *pbfi, void *pUserdata);
typedef void (NVENCAPI *PFNONENDFRAME)(const NVVE_EndFrameInfo *pefi, void *pUserdata);
typedef struct _NVVE_CallbackParams
{
PFNACQUIREBITSTREAM pfnacquirebitstream;
PFNRELEASEBITSTREAM pfnreleasebitstream;
PFNONBEGINFRAME pfnonbeginframe;
PFNONENDFRAME pfnonendframe;
} NVVE_CallbackParams;
typedef void *NVEncoder;
int NVENCAPI NVCreateEncoder(NVEncoder *pNVEncoder);
int NVENCAPI NVDestroyEncoder(NVEncoder hNVEncoder);
int NVENCAPI NVIsSupportedCodec(NVEncoder hNVEncoder, unsigned long dwCodecType);
int NVENCAPI NVIsSupportedCodecProfile(NVEncoder hNVEncoder, unsigned long dwCodecType, unsigned long dwProfileType);
int NVENCAPI NVSetCodec(NVEncoder hNVEncoder, unsigned long dwCodecType);
int NVENCAPI NVGetCodec(NVEncoder hNVEncoder, unsigned long *pdwCodecType);
int NVENCAPI NVIsSupportedParam(NVEncoder hNVEncoder, unsigned long dwParamType);
int NVENCAPI NVSetParamValue(NVEncoder hNVEncoder, unsigned long dwParamType, void *pData);
int NVENCAPI NVGetParamValue(NVEncoder hNVEncoder, unsigned long dwParamType, void *pData);
int NVENCAPI NVSetDefaultParam(NVEncoder hNVEncoder);
int NVENCAPI NVCreateHWEncoder(NVEncoder hNVEncoder);
int NVENCAPI NVGetSPSPPS(NVEncoder hNVEncoder, unsigned char *pSPSPPSbfr, int nSizeSPSPPSbfr, int *pDatasize);
int NVENCAPI NVEncodeFrame(NVEncoder hNVEncoder, NVVE_EncodeFrameParams *pFrmIn, unsigned long flag, void *pData);
int NVENCAPI NVGetHWEncodeCaps(void);
void NVENCAPI NVRegisterCB(NVEncoder hNVEncoder, NVVE_CallbackParams cb, void *pUserdata);
#ifdef __cplusplus
}
#endif
#endif
| ismagarcia/cohash | tmp/cohash-read-only/include/NVEncoderAPI.h | C | apache-2.0 | 2,870 |
## Privacy ##
This extension does not collect, store or transmit any data. | knom/vsts-office-tasks | PRIVACY.md | Markdown | apache-2.0 | 75 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.pherf.configuration;
import org.apache.phoenix.pherf.rules.RulesApplier;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlType;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@XmlType
public class Query {
private String id;
private String queryGroup;
private String tenantId;
private String statement;
private Long expectedAggregateRowCount;
private String ddl;
private boolean useGlobalConnection;
private Pattern pattern;
private long timeoutDuration = Long.MAX_VALUE;
public Query() {
pattern = Pattern.compile("\\[.*?\\]");
}
/**
* SQL statement
*
* @return
*/
@XmlAttribute
public String getStatement() {
return statement;
}
public String getDynamicStatement(RulesApplier ruleApplier, Scenario scenario)
throws Exception {
String ret = this.statement;
String needQuotes = "";
Matcher m = pattern.matcher(ret);
while (m.find()) {
String dynamicField = m.group(0).replace("[", "").replace("]", "");
Column dynamicColumn = ruleApplier.getRule(dynamicField, scenario);
needQuotes =
(dynamicColumn.getType() == DataTypeMapping.CHAR
|| dynamicColumn.getType() == DataTypeMapping.VARCHAR) ? "'" : "";
ret =
ret.replace("[" + dynamicField + "]",
needQuotes + ruleApplier.getDataValue(dynamicColumn).getValue()
+ needQuotes);
}
return ret;
}
public void setStatement(String statement) {
// normalize statement - merge all consecutive spaces into one
this.statement = statement.replaceAll("\\s+", " ");
}
/**
* Tenant Id used by connection of this query
*
* @return
*/
@XmlAttribute
public String getTenantId() {
return tenantId;
}
public void setTenantId(String tenantId) {
this.tenantId = tenantId;
}
/**
* Expected aggregate row count is matched if specified
*
* @return
*/
@XmlAttribute
public Long getExpectedAggregateRowCount() {
return expectedAggregateRowCount;
}
public void setExpectedAggregateRowCount(Long expectedAggregateRowCount) {
this.expectedAggregateRowCount = expectedAggregateRowCount;
}
/**
* DDL is executed only once. If tenantId is specified then DDL is executed with tenant
* specific connection.
*
* @return
*/
@XmlAttribute
public String getDdl() {
return ddl;
}
public void setDdl(String ddl) {
this.ddl = ddl;
}
/**
* queryGroup attribute is just a string value to help correlate queries across sets or files.
* This helps to make sense of reporting results.
*
* @return the group id
*/
@XmlAttribute
public String getQueryGroup() {
return queryGroup;
}
public void setQueryGroup(String queryGroup) {
this.queryGroup = queryGroup;
}
/**
* Set hint to query
*
* @param queryHint
*/
public void setHint(String queryHint) {
if (null != queryHint) {
this.statement =
this.statement.toUpperCase()
.replace("SELECT ", "SELECT /*+ " + queryHint + "*/ ");
}
}
/**
* Query ID, Use UUID if none specified
*
* @return
*/
@XmlAttribute
public String getId() {
if (null == this.id) {
this.id = java.util.UUID.randomUUID().toString();
}
return id;
}
public void setId(String id) {
this.id = id;
}
@XmlAttribute
public boolean isUseGlobalConnection() {
return useGlobalConnection;
}
public void setUseGlobalConnection(boolean useGlobalConnection) {
this.useGlobalConnection = useGlobalConnection;
}
@XmlAttribute
public long getTimeoutDuration() {
return this.timeoutDuration;
}
public void setTimeoutDuration(long timeoutDuration) {
this.timeoutDuration = timeoutDuration;
}
}
| ankitsinghal/phoenix | phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java | Java | apache-2.0 | 5,103 |
import { Rectangle } from "../../Source/Cesium.js";
import { computeFlyToLocationForRectangle } from "../../Source/Cesium.js";
import { Globe } from "../../Source/Cesium.js";
import { SceneMode } from "../../Source/Cesium.js";
import createScene from "../createScene.js";
import { when } from "../../Source/Cesium.js";
import MockTerrainProvider from "../MockTerrainProvider.js";
describe("Scene/computeFlyToLocationForRectangle", function () {
var scene;
beforeEach(function () {
scene = createScene();
});
afterEach(function () {
scene.destroyForSpecs();
});
function sampleTest(sceneMode) {
//Pretend we have terrain with availability.
var terrainProvider = new MockTerrainProvider();
terrainProvider.availability = {};
scene.globe = new Globe();
scene.terrainProvider = terrainProvider;
scene.mode = sceneMode;
var rectangle = new Rectangle(0.2, 0.4, 0.6, 0.8);
var cartographics = [
Rectangle.center(rectangle),
Rectangle.southeast(rectangle),
Rectangle.southwest(rectangle),
Rectangle.northeast(rectangle),
Rectangle.northwest(rectangle),
];
// Mock sampleTerrainMostDetailed with same positions but with heights.
var maxHeight = 1234;
var sampledResults = [
Rectangle.center(rectangle),
Rectangle.southeast(rectangle),
Rectangle.southwest(rectangle),
Rectangle.northeast(rectangle),
Rectangle.northwest(rectangle),
];
sampledResults[0].height = 145;
sampledResults[1].height = 1211;
sampledResults[2].height = -123;
sampledResults[3].height = maxHeight;
spyOn(
computeFlyToLocationForRectangle,
"_sampleTerrainMostDetailed"
).and.returnValue(when.resolve(sampledResults));
// Basically do the computation ourselves with our known values;
var expectedResult;
if (sceneMode === SceneMode.SCENE3D) {
expectedResult = scene.mapProjection.ellipsoid.cartesianToCartographic(
scene.camera.getRectangleCameraCoordinates(rectangle)
);
} else {
expectedResult = scene.mapProjection.unproject(
scene.camera.getRectangleCameraCoordinates(rectangle)
);
}
expectedResult.height += maxHeight;
return computeFlyToLocationForRectangle(rectangle, scene).then(function (
result
) {
expect(result).toEqual(expectedResult);
expect(
computeFlyToLocationForRectangle._sampleTerrainMostDetailed
).toHaveBeenCalledWith(terrainProvider, cartographics);
});
}
it("samples terrain and returns expected result in 3D", function () {
return sampleTest(SceneMode.SCENE3D);
});
it("samples terrain and returns expected result in CV", function () {
return sampleTest(SceneMode.COLUMBUS_VIEW);
});
it("returns height above ellipsoid when in 2D", function () {
var terrainProvider = new MockTerrainProvider();
terrainProvider.availability = {};
scene.globe = new Globe();
scene.terrainProvider = terrainProvider;
scene.mode = SceneMode.SCENE2D;
var rectangle = new Rectangle(0.2, 0.4, 0.6, 0.8);
var expectedResult = scene.mapProjection.unproject(
scene.camera.getRectangleCameraCoordinates(rectangle)
);
spyOn(computeFlyToLocationForRectangle, "_sampleTerrainMostDetailed");
return computeFlyToLocationForRectangle(rectangle, scene).then(function (
result
) {
expect(result).toEqual(expectedResult);
expect(
computeFlyToLocationForRectangle._sampleTerrainMostDetailed
).not.toHaveBeenCalled();
});
});
it("returns height above ellipsoid when terrain not available", function () {
scene.globe = new Globe();
scene.terrainProvider = new MockTerrainProvider();
var rectangle = new Rectangle(0.2, 0.4, 0.6, 0.8);
spyOn(computeFlyToLocationForRectangle, "_sampleTerrainMostDetailed");
var expectedResult = scene.mapProjection.ellipsoid.cartesianToCartographic(
scene.camera.getRectangleCameraCoordinates(rectangle)
);
return computeFlyToLocationForRectangle(rectangle, scene).then(function (
result
) {
expect(result).toEqual(expectedResult);
expect(
computeFlyToLocationForRectangle._sampleTerrainMostDetailed
).not.toHaveBeenCalled();
});
});
it("waits for terrain to become ready", function () {
var terrainProvider = new MockTerrainProvider();
spyOn(terrainProvider.readyPromise, "then").and.callThrough();
scene.globe = new Globe();
scene.terrainProvider = terrainProvider;
var rectangle = new Rectangle(0.2, 0.4, 0.6, 0.8);
var expectedResult = scene.mapProjection.ellipsoid.cartesianToCartographic(
scene.camera.getRectangleCameraCoordinates(rectangle)
);
return computeFlyToLocationForRectangle(rectangle, scene).then(function (
result
) {
expect(result).toEqual(expectedResult);
expect(terrainProvider.readyPromise.then).toHaveBeenCalled();
});
});
it("returns height above ellipsoid when terrain undefined", function () {
scene.terrainProvider = undefined;
var rectangle = new Rectangle(0.2, 0.4, 0.6, 0.8);
spyOn(computeFlyToLocationForRectangle, "_sampleTerrainMostDetailed");
var expectedResult = scene.mapProjection.ellipsoid.cartesianToCartographic(
scene.camera.getRectangleCameraCoordinates(rectangle)
);
return computeFlyToLocationForRectangle(rectangle, scene).then(function (
result
) {
expect(result).toEqual(expectedResult);
expect(
computeFlyToLocationForRectangle._sampleTerrainMostDetailed
).not.toHaveBeenCalled();
});
});
});
| progsung/cesium | Specs/Scene/computeFlyToLocationForRectangleSpec.js | JavaScript | apache-2.0 | 5,648 |
<div ng-controller="kubernetesRunJobExecutionDetailsCtrl">
<execution-details-section-nav sections="configSections"></execution-details-section-nav>
<div class="step-section-details" ng-if="detailsSection === 'runJobConfig'">
<div class="row">
<div class="col-md-9">
<dl class="dl-narrow dl-horizontal">
<dt>Account</dt>
<dd><account-tag account="stage.context.account"></account-tag></dd>
<dt>Namespace</dt>
<dd>{{stage.context.namespace}}</dd>
<dt>Image</dt>
<span ng-repeat="container in stage.context.containers">
<dd ng-if="!container.imageDescription.fromTrigger">
{{[container.imageDescription.repository, container.imageDescription.tag].join(':')}}
</dd>
<dd ng-if="container.imageDescription.fromTrigger">
{{[container.imageDescription.repository, execution.trigger.tag].join(':')}}
</dd>
</span>
<dt ng-if="stage.context.jobStatus.logs">Logs</dt>
<dd ng-if="stage.context.jobStatus.logs">
<dl><a href="" ng-click="displayLogs()">Console Output (Raw)</a></dl>
</dd>
</dl>
</div>
</div>
<stage-failure-message stage="stage" message="stage.failureMessage"></stage-failure-message>
<div class="row" ng-if="stage.context.execution.logs">
<div class="col-md-12">
<div class="well alert alert-info">
<!-- TODO: Move this to config -->
<a target="_blank" href="{{stage.context.execution.logs}}">
View Execution Logs
</a>
</div>
</div>
</div>
<div class="row" ng-if="stage.context.completionDetails">
<div class="col-md-12">
<div class="well alert alert-info">
<h4>Results</h4>
<dl class="dl-narrow dl-horizontal">
<div ng-repeat="(key, value) in stage.context.completionDetails">
<dt>{{ key }}</dt>
<dd>{{ value }}</dd>
</div>
</dl>
</div>
</div>
</div>
</div>
<div class="step-section-details" ng-if="detailsSection === 'taskStatus'">
<div class="row">
<execution-step-details item="stage"></execution-step-details>
</div>
</div>
</div>
| sgarlick987/deck | app/scripts/modules/kubernetes/src/v1/pipeline/stages/runJob/runJobExecutionDetails.html | HTML | apache-2.0 | 2,292 |
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package hydra;
import java.io.*;
import java.util.*;
/**
* A Vector subclass with methods for storing representations of ints,
* longs, and doubles. Has corresponding methods for retrieving instances
* of same (for example, longAt()) without doing explicit casts.
*
* Also implements some statistical functions so that you easily
* get mean, std deviation, and so forth of a NumVector of data.
*
* Not overwhelmingly efficient in terms of either time or space. Running
* on a JavaSoft VM on my old Sparc, this takes about 900 milliseconds to run
* the stats on 10k numbers.
*
* @author <A HREF="mailto:nastosm@gemstone.com">Mike Nastos</A>
* @version 1.0, 01/05/98
*/
public class NumVector extends Vector implements Serializable {
public NumVector() {
super();
}
public NumVector(Collection c) {
super(c);
}
public NumVector(int initialCapacity) {
super(initialCapacity);
}
public NumVector(int initialCapacity, int capacityIncrement) {
super(initialCapacity, capacityIncrement);
}
/**
*
* Adds the elements of aVector to the receiver
*/
public void addAll(Vector aVector) {
for (int i = 0; i < aVector.size(); i++) {
this.add(aVector.elementAt(i));
}
}
/**
*
* Adds a double to the receiver
*
* @param arg the double to be stored
*/
public void add(double arg) {
super.add(new Double(arg));
}
/**
*
* Adds an int to the receiver
*
* @param arg the int to be stored
*/
public void add(int arg) {
super.add(new Integer(arg));
}
/**
*
* Adds a long to the receiver
*
* @param arg the long to be stored
*/
public void add(long arg) {
super.add(new Long(arg));
}
/**
*
* Returns the item at index pos as a double
*
* @param pos the index of the item to be returned
*/
public double doubleAt(int pos) {
Object element;
Long lElement;
Integer iElement;
Double dElement;
element = super.elementAt(pos);
if (element instanceof Integer) {
iElement = (Integer) element;
return iElement.doubleValue();
}
if (element instanceof Double) {
dElement = (Double) element;
return dElement.doubleValue();
}
else {
lElement = (Long) element;
return lElement.doubleValue();
}
}
/**
*
* Returns the item at index pos as an int
*
* @param pos the index of the item to be returned
*/
public long intAt(int pos) {
Object element;
Long lElement;
Integer iElement;
Double dElement;
element = super.elementAt(pos);
if (element instanceof Integer) {
iElement = (Integer) element;
return iElement.intValue();
}
if (element instanceof Double) {
dElement = (Double) element;
return dElement.intValue();
}
else {
lElement = (Long) element;
return lElement.intValue();
}
}
/**
*
* Returns the item at index pos as a long
*
* @param pos the index of the item to be returned
*/
public long longAt(int pos) {
Object element;
Long lElement;
Integer iElement;
Double dElement;
element = super.elementAt(pos);
if (element instanceof Integer) {
iElement = (Integer) element;
return iElement.longValue();
}
if (element instanceof Double) {
dElement = (Double) element;
return dElement.longValue();
}
else {
lElement = (Long) element;
return lElement.longValue();
}
}
/**
*
* Returns a double representing he mean of the receiver's contents
*
*/
public double mean() {
int num = this.size();
double sum = this.sum();
return sum/num;
}
/**
*
* Returns a double representing the stddev of the receiver's contents
*
*/
public double stddev() {
double statRay[] = this.stats();
return statRay[2];
}
/**
*
* Calculates mean, std dev, and so on, then returns the resulting
* stats as a String.
*
*/
public String printStats() {
if (this.size() == 0)
return null;
double statRay[] = this.stats();
return "Number of task times: " + (int) statRay[0] +
"\nMean: " + (float) statRay[1] + " ms" +
"\nStandard deviation: " + (float) statRay[2] + " ms" +
"\nSum: " + (float) statRay[3] + " ms" +
"\nMax Value: " + (float) statRay[4] + " ms" +
"\nMin Value: " + (float) statRay[5] + " ms";
}
/**
*
* Calculates mean, std dev, and so on, then returns the resulting
* stats as a String in table-row format.
*
*/
public String printStatsAsTableRow() {
if (this.size() == 0)
return "No data present.";
double statRay[] = this.stats();
return " " + (int) statRay[0] +
" " + (float) statRay[1] +
" " + (float) statRay[2] +
" " + (float) statRay[3] +
" " + (float) statRay[4] +
" " + (float) statRay[5];
}
/**
*
* Returns a String for use as a column labels in a table
* stat printout.
*
*/
public String printStatsTableHeader() {
if (this.size() == 0)
return "No data present.";
double statRay[] = this.stats();
return "Set sz " +
"Mean " +
"Std dev " +
"Sum " +
"Max " +
"Min ";
}
/**
*
* Calculates mean, std dev, and so on, then returns the resulting
* stats as an array of doubles. The format of the array is:
*
* [0] data set size
* [1] standard deviation
* [2] sum of the data items
* [3] the max value
* [4] the min value
*/
public double[] stats() {
// double avg = 0.0;
double sum = 0.0;
double hi, lo, dev, mean, square, squareMean;
// double median, mode;
double statRay[] = new double[6];
NumVector squares = new NumVector();
// Hashtable freqs = new Hashtable();
int num = this.size();
double curr;
lo = hi = this.doubleAt(0);
for (int i = 0; i < num; i++) {
curr = this.doubleAt(i);
sum += curr;
if (curr < lo)
lo = curr;
if (curr > hi)
hi = curr;
};
mean = sum/num;
for (int i = 0; i < this.size(); i++) {
curr = this.doubleAt(i);
dev = Math.abs(curr - mean);
square = dev * dev;
squares.add(square);
}
mean = sum/num;
squareMean = squares.mean();
dev = Math.sqrt(squareMean);
statRay[0] = this.size();
statRay[1] = mean;
statRay[2] = dev;
statRay[3] = sum;
statRay[4] = hi;
statRay[5] = lo;
return statRay;
}
/**
*
* Returns a double representing the sum of the receiver's contents
*
*/
public double sum() {
double sum = 0.0;
double curr;
for (int i = 0; i < this.size(); i++) {
curr = this.doubleAt(i);
sum += curr;
};
return sum;
}
/**
*
* A test routine that creates a NumVector and calculates the stats
*
*/
public static void main(String[] args) {
int i1;
// int i2;
double d1;
// double d2;
long l1;
// long l2;
NumVector v = new NumVector();
l1 = 34L;
i1 = 45;
d1 = 34.93;
v.add(l1);
v.add(i1);
v.add(d1);
System.out.println(v.intAt(0));
System.out.println(v.longAt(0));
System.out.println(v.elementAt(0));
System.out.println(v.longAt(1));
System.out.println(v.doubleAt(2));
System.out.println(v.doubleAt(1));
long begin = 0;
long end = 0;
for (int i = 0; i < 100; i++) {
v.add(i);
}
begin = System.currentTimeMillis();
for (int i = 0; i < v.size(); i++) {
v.intAt(i);
}
end = System.currentTimeMillis();
System.out.println("Access time for " + v.size() + " elements " +
(end - begin));
begin = System.currentTimeMillis();
System.out.println("Mean of values: " + v.mean());
end = System.currentTimeMillis();
System.out.println("Time to compute mean for " + v.size() + " elements " +
(end - begin));
begin = System.currentTimeMillis();
System.out.println(v.printStats());
end = System.currentTimeMillis();
System.out.println("Time to compute all stats for " +
v.size() + " elements " +
(end - begin));
NumVector newVec = new NumVector();
newVec.addAll(v);
System.out.println("After addAll, size is: " + newVec.size());
System.out.println(newVec.printStatsTableHeader() + "\n");
System.out.println(newVec.printStatsAsTableRow() + "\n");
}
}
| gemxd/gemfirexd-oss | tests/core/src/main/java/hydra/NumVector.java | Java | apache-2.0 | 9,494 |
/**
* OLAT - Online Learning and Training<br>
* http://www.olat.org
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Copyright (c) 2008 frentix GmbH, Switzerland<br>
* <p>
*/
package org.olat.course.nodes.info;
import java.text.DateFormat;
import java.util.List;
import java.util.Locale;
import org.apache.commons.lang.StringEscapeUtils;
import org.olat.commons.info.manager.InfoMessageFrontendManager;
import org.olat.commons.info.model.InfoMessage;
import org.olat.core.gui.UserRequest;
import org.olat.core.gui.components.Component;
import org.olat.core.gui.components.table.BaseTableDataModelWithoutFilter;
import org.olat.core.gui.components.table.ColumnDescriptor;
import org.olat.core.gui.components.table.CustomCellRenderer;
import org.olat.core.gui.components.table.CustomRenderColumnDescriptor;
import org.olat.core.gui.components.table.TableController;
import org.olat.core.gui.components.table.TableDataModel;
import org.olat.core.gui.components.table.TableGuiConfiguration;
import org.olat.core.gui.control.Event;
import org.olat.core.gui.control.WindowControl;
import org.olat.core.gui.control.controller.BasicController;
import org.olat.core.gui.render.Renderer;
import org.olat.core.gui.render.StringOutput;
import org.olat.core.helpers.Settings;
import org.olat.core.id.OLATResourceable;
import org.olat.core.id.context.BusinessControlFactory;
import org.olat.core.id.context.ContextEntry;
import org.olat.core.util.Formatter;
import org.olat.core.util.StringHelper;
import org.olat.core.util.resource.OresHelper;
import org.olat.course.CourseModule;
import org.olat.course.nodes.InfoCourseNode;
import org.olat.course.run.userview.UserCourseEnvironment;
/**
* Description:<br>
* Peekview for info messages
* <P>
* Initial Date: 3 aug. 2010 <br>
*
* @author srosse, stephane.rosse@frentix.com, http://www.frentix.com
*/
public class InfoPeekViewController extends BasicController {
private final OLATResourceable ores;
private final InfoCourseNode courseNode;
private TableController tableController;
public InfoPeekViewController(final UserRequest ureq, final WindowControl wControl, final UserCourseEnvironment userCourseEnv, final InfoCourseNode courseNode) {
super(ureq, wControl);
this.courseNode = courseNode;
final Long resId = userCourseEnv.getCourseEnvironment().getCourseResourceableId();
ores = OresHelper.createOLATResourceableInstance(CourseModule.class, resId);
init(ureq);
putInitialPanel(tableController.getInitialComponent());
}
private void init(final UserRequest ureq) {
final TableGuiConfiguration tableConfig = new TableGuiConfiguration();
tableConfig.setTableEmptyMessage(translate("peekview.noInfos"));
tableConfig.setDisplayTableHeader(false);
tableConfig.setCustomCssClass("b_portlet_table");
tableConfig.setDisplayRowCount(false);
tableConfig.setPageingEnabled(false);
tableConfig.setDownloadOffered(false);
tableConfig.setSortingEnabled(false);
removeAsListenerAndDispose(tableController);
tableController = new TableController(tableConfig, ureq, getWindowControl(), getTranslator());
tableController.addColumnDescriptor(new CustomRenderColumnDescriptor("peekview.title", 0, null, ureq.getLocale(), ColumnDescriptor.ALIGNMENT_LEFT,
new InfoNodeRenderer()));
final String resSubPath = this.courseNode.getIdent();
final List<InfoMessage> infos = InfoMessageFrontendManager.getInstance().loadInfoMessageByResource(ores, resSubPath, null, null, null, 0, 5);
final InfosTableModel model = new InfosTableModel(infos);
tableController.setTableDataModel(model);
listenTo(tableController);
}
@Override
protected void doDispose() {
//
}
@Override
protected void event(final UserRequest ureq, final Component source, final Event event) {
//
}
private class InfosTableModel extends BaseTableDataModelWithoutFilter implements TableDataModel {
private final List<InfoMessage> infos;
public InfosTableModel(final List<InfoMessage> infos) {
this.infos = infos;
}
@Override
public int getColumnCount() {
return 1;
}
@Override
public int getRowCount() {
return infos.size();
}
@Override
public Object getValueAt(final int row, final int col) {
final InfoMessage info = infos.get(row);
switch (col) {
case 0:
return info;
default:
return null;
}
}
}
public String getUrl(final String businessPath) {
final BusinessControlFactory bCF = BusinessControlFactory.getInstance();
final List<ContextEntry> ceList = bCF.createCEListFromString(businessPath);
final StringBuilder retVal = new StringBuilder();
retVal.append(Settings.getServerContextPathURI()).append("/url/");
for (final ContextEntry contextEntry : ceList) {
String ceStr = contextEntry.toString();
ceStr = ceStr.replace(':', '/');
ceStr = ceStr.replaceFirst("\\]", "/");
ceStr = ceStr.replaceFirst("\\[", "");
retVal.append(ceStr);
}
return retVal.substring(0, retVal.length() - 1);
}
public class InfoNodeRenderer implements CustomCellRenderer {
private DateFormat formatter;
public InfoNodeRenderer() {
//
}
@Override
public void render(final StringOutput sb, final Renderer renderer, final Object val, final Locale locale, final int alignment, final String action) {
if (val instanceof InfoMessage) {
final InfoMessage item = (InfoMessage) val;
// date
if (formatter == null) {
formatter = DateFormat.getDateInstance(DateFormat.MEDIUM, locale);
}
sb.append(formatter.format(item.getCreationDate())).append(": ");
// title
final boolean tooltip = StringHelper.containsNonWhitespace(item.getMessage());
if (tooltip) {
final String message = Formatter.escWithBR(Formatter.truncate(item.getMessage(), 255)).toString();
sb.append("<span ext:qtip=\"").append(StringEscapeUtils.escapeHtml(message)).append("\">");
} else {
sb.append("<span>");
}
final String title = Formatter.truncate(item.getTitle(), 64);
sb.append(title).append("</span> ");
// link
if (StringHelper.containsNonWhitespace(item.getBusinessPath())) {
final String url = getUrl(item.getBusinessPath());
sb.append("<a href=\"").append(url).append("\" class=\"o_peekview_infomsg_link\">").append(translate("peekview.more")).append("</a>");
}
} else {
sb.append("-");
}
}
}
} | RLDevOps/Demo | src/main/java/org/olat/course/nodes/info/InfoPeekViewController.java | Java | apache-2.0 | 6,892 |
package org.csanchez.jenkins.plugins.kubernetes;
import hudson.Extension;
import hudson.model.AbstractDescribableImpl;
import hudson.model.Descriptor;
import org.jenkinsci.Symbol;
import io.fabric8.kubernetes.api.model.ContainerPort;
import java.io.Serializable;
import org.kohsuke.stapler.DataBoundConstructor;
import org.kohsuke.stapler.DataBoundSetter;
public class PortMapping extends AbstractDescribableImpl<PortMapping> implements Serializable {
private String name;
private Integer containerPort;
private Integer hostPort;
@DataBoundConstructor
public PortMapping(String name, Integer containerPort) {
this.name = name;
this.containerPort = containerPort;
}
public PortMapping(String name, Integer containerPort, Integer hostPort) {
this.name = name;
this.containerPort = containerPort;
this.hostPort = hostPort;
}
@DataBoundSetter
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
@DataBoundSetter
public void setContainerPort(Integer containerPort) {
this.containerPort = containerPort;
}
public Integer getContainerPort() {
return containerPort;
}
@DataBoundSetter
public void setHostPort(Integer hostPort) {
this.hostPort = hostPort;
}
public Integer getHostPort() {
return hostPort;
}
public ContainerPort toPort() {
ContainerPort p = new ContainerPort();
p.setName(name);
p.setContainerPort(containerPort);
if(hostPort != null) {
p.setHostPort(hostPort);
}
return p;
}
public String toString() {
return String.format("%s,%d", name, containerPort);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((containerPort == null) ? 0 : containerPort);
result = prime * result + ((hostPort == null) ? 0 : hostPort);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PortMapping other = (PortMapping) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (containerPort == null) {
if (other.containerPort != null)
return false;
} else if (!containerPort.equals(other.containerPort))
return false;
if (hostPort == null) {
if (other.hostPort != null)
return false;
} else if (!hostPort.equals(other.hostPort))
return false;
return true;
}
@Extension
@Symbol("portMapping")
public static class DescriptorImpl extends Descriptor<PortMapping> {
@Override
public String getDisplayName() {
return "Container Exposed Ports";
}
}
}
| azweb76/kubernetes-plugin | src/main/java/org/csanchez/jenkins/plugins/kubernetes/PortMapping.java | Java | apache-2.0 | 3,224 |
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/profiler/allocation-tracker.h"
#include "src/frames-inl.h"
#include "src/global-handles.h"
#include "src/objects-inl.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
AllocationTraceNode::AllocationTraceNode(
AllocationTraceTree* tree, unsigned function_info_index)
: tree_(tree),
function_info_index_(function_info_index),
total_size_(0),
allocation_count_(0),
id_(tree->next_node_id()) {
}
AllocationTraceNode::~AllocationTraceNode() {
for (AllocationTraceNode* node : children_) delete node;
}
AllocationTraceNode* AllocationTraceNode::FindChild(
unsigned function_info_index) {
for (AllocationTraceNode* node : children_) {
if (node->function_info_index() == function_info_index) return node;
}
return nullptr;
}
AllocationTraceNode* AllocationTraceNode::FindOrAddChild(
unsigned function_info_index) {
AllocationTraceNode* child = FindChild(function_info_index);
if (child == nullptr) {
child = new AllocationTraceNode(tree_, function_info_index);
children_.push_back(child);
}
return child;
}
void AllocationTraceNode::AddAllocation(unsigned size) {
total_size_ += size;
++allocation_count_;
}
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
base::OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != nullptr) {
AllocationTracker::FunctionInfo* info =
tracker->function_info_list()[function_info_index_];
base::OS::Print("%s #%u", info->name, id_);
} else {
base::OS::Print("%u #%u", function_info_index_, id_);
}
base::OS::Print("\n");
indent += 2;
for (AllocationTraceNode* node : children_) {
node->Print(indent, tracker);
}
}
AllocationTraceTree::AllocationTraceTree()
: next_node_id_(1),
root_(this, 0) {
}
AllocationTraceTree::~AllocationTraceTree() {
}
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
const Vector<unsigned>& path) {
AllocationTraceNode* node = root();
for (unsigned* entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
node = node->FindOrAddChild(*entry);
}
return node;
}
void AllocationTraceTree::Print(AllocationTracker* tracker) {
base::OS::Print("[AllocationTraceTree:]\n");
base::OS::Print("Total size | Allocation count | Function id | id\n");
root()->Print(0, tracker);
}
AllocationTracker::FunctionInfo::FunctionInfo()
: name(""),
function_id(0),
script_name(""),
script_id(0),
line(-1),
column(-1) {
}
void AddressToTraceMap::AddRange(Address start, int size,
unsigned trace_node_id) {
Address end = start + size;
RemoveRange(start, end);
RangeStack new_range(start, trace_node_id);
ranges_.insert(RangeMap::value_type(end, new_range));
}
unsigned AddressToTraceMap::GetTraceNodeId(Address addr) {
RangeMap::const_iterator it = ranges_.upper_bound(addr);
if (it == ranges_.end()) return 0;
if (it->second.start <= addr) {
return it->second.trace_node_id;
}
return 0;
}
void AddressToTraceMap::MoveObject(Address from, Address to, int size) {
unsigned trace_node_id = GetTraceNodeId(from);
if (trace_node_id == 0) return;
RemoveRange(from, from + size);
AddRange(to, size, trace_node_id);
}
void AddressToTraceMap::Clear() {
ranges_.clear();
}
void AddressToTraceMap::Print() {
PrintF("[AddressToTraceMap (%" PRIuS "): \n", ranges_.size());
for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
PrintF("[%p - %p] => %u\n", static_cast<void*>(it->second.start),
static_cast<void*>(it->first), it->second.trace_node_id);
}
PrintF("]\n");
}
void AddressToTraceMap::RemoveRange(Address start, Address end) {
RangeMap::iterator it = ranges_.upper_bound(start);
if (it == ranges_.end()) return;
RangeStack prev_range(0, 0);
RangeMap::iterator to_remove_begin = it;
if (it->second.start < start) {
prev_range = it->second;
}
do {
if (it->first > end) {
if (it->second.start < end) {
it->second.start = end;
}
break;
}
++it;
} while (it != ranges_.end());
ranges_.erase(to_remove_begin, it);
if (prev_range.start != 0) {
ranges_.insert(RangeMap::value_type(start, prev_range));
}
}
AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
id_to_function_info_index_(),
info_index_for_other_state_(0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(root)";
function_info_list_.push_back(info);
}
AllocationTracker::~AllocationTracker() {
for (UnresolvedLocation* location : unresolved_locations_) delete location;
for (FunctionInfo* info : function_info_list_) delete info;
}
void AllocationTracker::PrepareForSerialization() {
for (UnresolvedLocation* location : unresolved_locations_) {
location->Resolve();
delete location;
}
unresolved_locations_.clear();
unresolved_locations_.shrink_to_fit();
}
void AllocationTracker::AllocationEvent(Address addr, int size) {
DisallowHeapAllocation no_allocation;
Heap* heap = ids_->heap();
// Mark the new block as FreeSpace to make sure the heap is iterable
// while we are capturing stack trace.
heap->CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
Isolate* isolate = heap->isolate();
int length = 0;
JavaScriptFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
allocation_trace_buffer_[length++] = AddFunctionInfo(shared, id);
it.Advance();
}
if (length == 0) {
unsigned index = functionInfoIndexForVMState(isolate->current_vm_state());
if (index != 0) {
allocation_trace_buffer_[length++] = index;
}
}
AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
Vector<unsigned>(allocation_trace_buffer_, length));
top_node->AddAllocation(size);
address_to_trace_.AddRange(addr, size, top_node->id());
}
static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
return ComputeIntegerHash(static_cast<uint32_t>(id));
}
unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
base::HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
if (entry->value == nullptr) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());
info->function_id = id;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
if (script->name()->IsName()) {
Name* name = Name::cast(script->name());
info->script_name = names_->GetName(name);
}
info->script_id = script->id();
// Converting start offset into line and column may cause heap
// allocations so we postpone them until snapshot serialization.
unresolved_locations_.push_back(
new UnresolvedLocation(script, shared->StartPosition(), info));
}
entry->value = reinterpret_cast<void*>(function_info_list_.size());
function_info_list_.push_back(info);
}
return static_cast<unsigned>(reinterpret_cast<intptr_t>((entry->value)));
}
unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
if (state != OTHER) return 0;
if (info_index_for_other_state_ == 0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(V8 API)";
info_index_for_other_state_ =
static_cast<unsigned>(function_info_list_.size());
function_info_list_.push_back(info);
}
return info_index_for_other_state_;
}
AllocationTracker::UnresolvedLocation::UnresolvedLocation(
Script* script, int start, FunctionInfo* info)
: start_position_(start),
info_(info) {
script_ = script->GetIsolate()->global_handles()->Create(script);
GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()), this,
&HandleWeakScript, v8::WeakCallbackType::kParameter);
}
AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
if (!script_.is_null()) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(script_.location()));
}
}
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
HandleScope scope(script_->GetIsolate());
info_->line = Script::GetLineNumber(script_, start_position_);
info_->column = Script::GetColumnNumber(script_, start_position_);
}
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
const v8::WeakCallbackInfo<void>& data) {
UnresolvedLocation* loc =
reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
loc->script_ = Handle<Script>::null();
}
} // namespace internal
} // namespace v8
| weolar/miniblink49 | v8_6_7/src/profiler/allocation-tracker.cc | C++ | apache-2.0 | 9,354 |
package config
import (
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git.v4/plumbing"
)
type BranchSuite struct{}
var _ = Suite(&BranchSuite{})
func (b *BranchSuite) TestValidateName(c *C) {
goodBranch := Branch{
Name: "master",
Remote: "some_remote",
Merge: "refs/heads/master",
}
badBranch := Branch{
Remote: "some_remote",
Merge: "refs/heads/master",
}
c.Assert(goodBranch.Validate(), IsNil)
c.Assert(badBranch.Validate(), NotNil)
}
func (b *BranchSuite) TestValidateMerge(c *C) {
goodBranch := Branch{
Name: "master",
Remote: "some_remote",
Merge: "refs/heads/master",
}
badBranch := Branch{
Name: "master",
Remote: "some_remote",
Merge: "blah",
}
c.Assert(goodBranch.Validate(), IsNil)
c.Assert(badBranch.Validate(), NotNil)
}
func (b *BranchSuite) TestMarshall(c *C) {
expected := []byte(`[core]
bare = false
[branch "branch-tracking-on-clone"]
remote = fork
merge = refs/heads/branch-tracking-on-clone
rebase = interactive
`)
cfg := NewConfig()
cfg.Branches["branch-tracking-on-clone"] = &Branch{
Name: "branch-tracking-on-clone",
Remote: "fork",
Merge: plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"),
Rebase: "interactive",
}
actual, err := cfg.Marshal()
c.Assert(err, IsNil)
c.Assert(string(actual), Equals, string(expected))
}
func (b *BranchSuite) TestUnmarshall(c *C) {
input := []byte(`[core]
bare = false
[branch "branch-tracking-on-clone"]
remote = fork
merge = refs/heads/branch-tracking-on-clone
rebase = interactive
`)
cfg := NewConfig()
err := cfg.Unmarshal(input)
c.Assert(err, IsNil)
branch := cfg.Branches["branch-tracking-on-clone"]
c.Assert(branch.Name, Equals, "branch-tracking-on-clone")
c.Assert(branch.Remote, Equals, "fork")
c.Assert(branch.Merge, Equals, plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"))
c.Assert(branch.Rebase, Equals, "interactive")
}
| Miciah/origin | vendor/gopkg.in/src-d/go-git.v4/config/branch_test.go | GO | apache-2.0 | 1,899 |
//
// Copyright 2012 Alin Dobra and Christopher Jermaine
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef _FILESCANNER_H_
#define _FILESCANNER_H_
#include "ID.h"
#include "EventProcessor.h"
// include the base class definition
#include "EventProcessor.h"
// include the implementation definition
#include "FileScannerImp.h"
/** Class to provide an interface to FileScannerImp class.
See FileScannerImp.h for a description of the functions
and behavior of the class
*/
class FileScanner : public EventProcessor {
public:
// constructor (creates the implementation object)
FileScanner(const char * _metadataFile, const char* _scannerName, EventProcessor& _concurencyCorntroller, EventProcessor& _scheduler){
evProc = new FileScannerImp(_metadataFile, _scannerName, _concurencyCorntroller, _scheduler);
}
// default constructor
FileScanner(void){
evProc = NULL;
}
TableScanID GetID(void){
FileScannerImp& obj = dynamic_cast<FileScannerImp&>(*evProc);
return obj.GetID();
}
// the virtual destructor
virtual ~FileScanner(){}
};
#endif // _FILESCANNER_H_
| RayZ-O/grokit | src/DiskIO/headers/FileScanner.h | C | apache-2.0 | 1,648 |
define(function(require, exports, module) {
var Notify = require('common/bootstrap-notify');
require("jquery.bootstrap-datetimepicker");
var validator = require('bootstrap.validator');
exports.run = function() {
var now = new Date();
$("#startDate").datetimepicker({
autoclose: true
}).on('changeDate', function() {
$("#endDate").datetimepicker('setStartDate', $("#startDate").val().substring(0, 16));
});
$("#startDate").datetimepicker('setEndDate', $("#endDate").val().substring(0, 16));
$("#endDate").datetimepicker({
autoclose: true
}).on('changeDate', function() {
$("#startDate").datetimepicker('setEndDate', $("#endDate").val().substring(0, 16));
});
$("#endDate").datetimepicker('setStartDate', $("#startDate").val().substring(0, 16));
if ($("#status").val() == 'end') {
$("#endDate").datetimepicker('setEndDate', now);
$("#startDate").datetimepicker('setEndDate', now);
}
if ($("#status").val() == 'coming') {
$("#startDate").datetimepicker('setStartDate', now);
$("#endDate").datetimepicker('setStartDate', now); //只用开始时间搜,不考虑结束时间
}
if ($("#status").val() == 'underway') {
$("#startDate").datetimepicker('setEndDate', now);
$("#endDate").datetimepicker('setStartDate', now);
}
// .datetimepicker('setEndDate' 可视为<
// .datetimepicker('setStartDate' 可视为>
$('#course-table').on('click', 'tbody tr span ', function() {
$.get($(this).parent().data('url'), function(data) {
$('#course-table tbody tr td')[4].innerHTML = data.maxOnlineNum;
});
});
};
}); | 18826252059/im | web/bundles/topxiaadmin/js/controller/course/live-lesson-search.js | JavaScript | apache-2.0 | 1,840 |
/*
Copyright 2016 Mozilla
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
*/
import { put, apply } from 'redux-saga/effects';
import { infallible, takeLatestMultiple } from '../../shared/util/saga-util';
import userAgentHttpClient from '../../../shared/user-agent-http-client';
import * as EffectTypes from '../constants/effect-types';
import * as MainActions from '../actions/main-actions';
export default function*() {
yield takeLatestMultiple({ infallible, logger: console },
[EffectTypes.FETCH_HISTORY, fetchHistory],
[EffectTypes.FETCH_STARS, fetchStars],
);
}
export function* fetchHistory({ query, limit }) {
const visitedPages = query
? yield apply(userAgentHttpClient, userAgentHttpClient.query, [{
limit,
text: query,
snippetSize: 'large',
}])
: yield apply(userAgentHttpClient, userAgentHttpClient.visited, [{
limit,
}]);
yield put(MainActions.showHistory(visitedPages.results));
}
export function* fetchStars({ limit }) {
const starredItems = yield apply(userAgentHttpClient, userAgentHttpClient.stars, [{
limit,
}]);
yield put(MainActions.showStars(starredItems.results));
}
| jsantell/tofino | app/ui/content/sagas/index.js | JavaScript | apache-2.0 | 1,632 |
# Copyright (c) 2003-2013 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id$
#
# Description:
# EAP packets
#
# Author:
# Aureliano Calvo
from impacket.helper import ProtocolPacket, Byte, Word, Long, ThreeBytesBigEndian
DOT1X_AUTHENTICATION = 0x888E
class EAPExpanded(ProtocolPacket):
"""EAP expanded data according to RFC 3748, section 5.7"""
WFA_SMI = 0x00372a
SIMPLE_CONFIG = 0x00000001
header_size = 7
tail_size = 0
vendor_id = ThreeBytesBigEndian(0)
vendor_type = Long(3, ">")
class EAPR(ProtocolPacket):
"""It represents a request or a response in EAP (codes 1 and 2)"""
IDENTITY = 0x01
EXPANDED = 0xfe
header_size = 1
tail_size = 0
type = Byte(0)
class EAP(ProtocolPacket):
REQUEST = 0x01
RESPONSE = 0x02
SUCCESS = 0x03
FAILURE = 0x04
header_size = 4
tail_size = 0
code = Byte(0)
identifier = Byte(1)
length = Word(2, ">")
class EAPOL(ProtocolPacket):
EAP_PACKET = 0x00
EAPOL_START = 0x01
EAPOL_LOGOFF = 0x02
EAPOL_KEY = 0x03
EAPOL_ENCAPSULATED_ASF_ALERT = 0x04
DOT1X_VERSION = 0x01
header_size = 4
tail_size = 0
version = Byte(0)
packet_type = Byte(1)
body_length = Word(2, ">")
| hecchi777/S3-SlaacSecuritySolution | impacket-0.9.11/impacket/eap.py | Python | apache-2.0 | 1,437 |
package io.anyway.sherlock.datasource.support.strategy;
import javax.sql.DataSource;
import io.anyway.sherlock.datasource.PartitionDataSource;
import io.anyway.sherlock.datasource.support.WeightDataSourceProxy;
public class WeightStrategyWithMasterSupport extends WeightStrategySupport {
@Override
public DataSource getSlaveDataSource(PartitionDataSource pds) {
return getDataSourceByWeight(pds,((WeightDataSourceProxy)pds.getMasterDataSource()).getWeight());
}
@Override
public String getStrategyName(){
return "weight-m";
}
}
| balanacebeam/PDDL | sherlock-core/src/main/java/io/anyway/sherlock/datasource/support/strategy/WeightStrategyWithMasterSupport.java | Java | apache-2.0 | 545 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::dlink::dgs3100::snmp::mode::components::psu;
use strict;
use warnings;
my %map_states = (
1 => 'normal',
2 => 'warning',
3 => 'critical',
4 => 'shutdown',
5 => 'notPresent',
6 => 'notFunctioning',
);
# In MIB 'env_mib.mib'
my $mapping = {
rlEnvMonSupplyStatusDescr => { oid => '.1.3.6.1.4.1.171.10.94.89.89.83.1.2.1.2' },
rlEnvMonSupplyState => { oid => '.1.3.6.1.4.1.171.10.94.89.89.83.1.2.1.3', map => \%map_states },
};
my $oid_rlEnvMonSupplyStatusEntry = '.1.3.6.1.4.1.171.10.94.89.89.83.1.2.1';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_rlEnvMonSupplyStatusEntry };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking power supplies");
$self->{components}->{psu} = {name => 'psus', total => 0, skip => 0};
return if ($self->check_filter(section => 'psu'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_rlEnvMonSupplyStatusEntry}})) {
next if ($oid !~ /^$mapping->{rlEnvMonSupplyStatusDescr}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_rlEnvMonSupplyStatusEntry}, instance => $instance);
next if ($self->check_filter(section => 'psu', instance => $result->{rlEnvMonSupplyStatusDescr}));
next if ($result->{rlEnvMonSupplyState} eq 'notPresent' &&
$self->absent_problem(section => 'psu', instance => $result->{rlEnvMonSupplyStatusDescr}));
$self->{components}->{psu}->{total}++;
$self->{output}->output_add(long_msg => sprintf("Power supply '%s' status is %s.",
$result->{rlEnvMonSupplyStatusDescr}, $result->{rlEnvMonSupplyState}
));
my $exit = $self->get_severity(section => 'psu', value => $result->{rlEnvMonSupplyState});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Power supply '%s' status is %s",
$result->{rlEnvMonSupplyStatusDescr}, $result->{rlEnvMonSupplyState}));
}
}
}
1; | Shini31/centreon-plugins | network/dlink/dgs3100/snmp/mode/components/psu.pm | Perl | apache-2.0 | 3,112 |
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.maven.p2.exceptions;
/**
* CarbonArtifactNotFoundException is thrown when a CarbonArtifact is not found.
*
* @since 2.0.0
*/
public class CarbonArtifactNotFoundException extends Exception {
/**
* Constructs the CarbonArtifactNotFoundException.
*
* @param message message of the exception.
*/
public CarbonArtifactNotFoundException(String message) {
super(message);
}
}
| wso2/carbon-maven-plugins | carbon-feature-plugin/src/main/java/org/wso2/maven/p2/exceptions/CarbonArtifactNotFoundException.java | Java | apache-2.0 | 1,081 |
/**
Copyright 2015 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
**/
require("./trace_code_entry.js");
'use strict';
global.tr.exportTo('tr.e.importer', function() {
// This code is a tracification of:
// devtools/front_end/timeline/TimelineJSProfile.js
function TraceCodeMap() {
this.banks_ = new Map();
}
TraceCodeMap.prototype = {
addEntry: function(addressHex, size, name, scriptId) {
var entry = new tr.e.importer.TraceCodeEntry(
this.getAddress_(addressHex), size, name, scriptId);
this.addEntry_(addressHex, entry);
},
moveEntry: function(oldAddressHex, newAddressHex, size) {
var entry = this.getBank_(oldAddressHex)
.removeEntry(this.getAddress_(oldAddressHex));
if (!entry)
return;
entry.address = this.getAddress_(newAddressHex);
entry.size = size;
this.addEntry_(newAddressHex, entry);
},
lookupEntry: function(addressHex) {
return this.getBank_(addressHex)
.lookupEntry(this.getAddress_(addressHex));
},
addEntry_: function(addressHex, entry) {
// FIXME: Handle bank spanning addresses ...
this.getBank_(addressHex).addEntry(entry);
},
getAddress_: function(addressHex) {
// 13 hex digits == 52 bits, double mantissa fits 53 bits.
var bankSizeHexDigits = 13;
addressHex = addressHex.slice(2); // cut 0x prefix.
return parseInt(addressHex.slice(-bankSizeHexDigits), 16);
},
getBank_: function(addressHex) {
addressHex = addressHex.slice(2); // cut 0x prefix.
// 13 hex digits == 52 bits, double mantissa fits 53 bits.
var bankSizeHexDigits = 13;
var maxHexDigits = 16;
var bankName = addressHex.slice(-maxHexDigits, -bankSizeHexDigits);
var bank = this.banks_.get(bankName);
if (!bank) {
bank = new TraceCodeBank();
this.banks_.set(bankName, bank);
}
return bank;
}
};
function TraceCodeBank() {
this.entries_ = [];
}
TraceCodeBank.prototype = {
removeEntry: function(address) {
// findLowIndexInSortedArray returns 1 for empty. Just handle the
// empty list and bail early.
if (this.entries_.length === 0)
return undefined;
var index = tr.b.findLowIndexInSortedArray(
this.entries_, function(entry) { return entry.address; }, address);
var entry = this.entries_[index];
if (!entry || entry.address !== address)
return undefined;
this.entries_.splice(index, 1);
return entry;
},
lookupEntry: function(address) {
var index = tr.b.findHighIndexInSortedArray(
this.entries_, function(e) { return address - e.address; }) - 1;
var entry = this.entries_[index];
return entry &&
address < entry.address + entry.size ? entry : undefined;
},
addEntry: function(newEntry) {
// findLowIndexInSortedArray returns 1 for empty list. Just push the
// new address as it's the only item.
if (this.entries_.length === 0)
this.entries_.push(newEntry);
var endAddress = newEntry.address + newEntry.size;
var lastIndex = tr.b.findLowIndexInSortedArray(
this.entries_, function(entry) { return entry.address; }, endAddress);
var index;
for (index = lastIndex - 1; index >= 0; --index) {
var entry = this.entries_[index];
var entryEndAddress = entry.address + entry.size;
if (entryEndAddress <= newEntry.address)
break;
}
++index;
this.entries_.splice(index, lastIndex - index, newEntry);
}
};
return {
TraceCodeMap: TraceCodeMap
};
});
| zhaoz/lighthouse | lighthouse-core/third_party/traceviewer-js/extras/importer/trace_code_map.js | JavaScript | apache-2.0 | 3,784 |
#!/bin/bash
#
# Copyright (c) 2013-%%copyright.year%% Commonwealth Computer Research, Inc.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0 which
# accompanies this distribution and is available at
# http://www.opensource.org/licenses/apache2.0.php.
#
# This script will attempt to install the client dependencies for hadoop (for GeoMesa HBase)
# into a given directory. Usually this is used to install the deps into either the
# geomesa tools lib dir or the WEB-INF/lib dir of geoserver.
hadoop_version="%%hadoop.version.recommended%%"
zookeeper_version="%%zookeeper.version.recommended%%"
# this version required for hadoop 2.8, earlier hadoop versions use 3.1.0-incubating
htrace_core_version="4.1.0-incubating"
# These are needed for Hadoop and to work
# These will depend on the specific hadoop versions
guava_version="%%hbase.guava.version%%"
com_log_version="1.1.3"
netty3_version="3.6.2.Final"
netty4_version="%%netty.version%%"
# Load common functions and setup
if [ -z "${%%gmtools.dist.name%%_HOME}" ]; then
export %%gmtools.dist.name%%_HOME="$(cd "`dirname "$0"`"/..; pwd)"
fi
. $%%gmtools.dist.name%%_HOME/bin/common-functions.sh
install_dir="${1:-${%%gmtools.dist.name%%_HOME}/lib}"
# Resource download location
base_url="${GEOMESA_MAVEN_URL:-https://search.maven.org/remotecontent?filepath=}"
declare -a urls=(
"${base_url}org/apache/zookeeper/zookeeper/${zookeeper_version}/zookeeper-${zookeeper_version}.jar"
"${base_url}commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar"
"${base_url}org/apache/hadoop/hadoop-auth/${hadoop_version}/hadoop-auth-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-client/${hadoop_version}/hadoop-client-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-common/${hadoop_version}/hadoop-common-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-hdfs/${hadoop_version}/hadoop-hdfs-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-hdfs-client/${hadoop_version}/hadoop-hdfs-client-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-mapreduce-client-core/${hadoop_version}/hadoop-mapreduce-client-core-${hadoop_version}.jar"
"${base_url}commons-logging/commons-logging/${com_log_version}/commons-logging-${com_log_version}.jar"
"${base_url}commons-cli/commons-cli/1.2/commons-cli-1.2.jar"
"${base_url}commons-io/commons-io/2.5/commons-io-2.5.jar"
"${base_url}javax/servlet/servlet-api/2.4/servlet-api-2.4.jar"
"${base_url}io/netty/netty-all/${netty4_version}/netty-all-${netty4_version}.jar"
"${base_url}io/netty/netty/${netty3_version}/netty-${netty3_version}.jar"
"${base_url}com/yammer/metrics/metrics-core/2.2.0/metrics-core-2.2.0.jar"
)
zk_maj_ver="$(expr match "$zookeeper_version" '\([0-9][0-9]*\)\.')"
zk_min_ver="$(expr match "$zookeeper_version" '[0-9][0-9]*\.\([0-9][0-9]*\)')"
zk_bug_ver="$(expr match "$zookeeper_version" '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\)')"
# compare the version of zookeeper to determine if we need zookeeper-jute (version >= 3.5.5)
if [[ "$zk_maj_ver" -ge 3 && "$zk_min_ver" -ge 5 && "$zk_bug_ver" -ge 5 ]]; then
urls+=("${base_url}org/apache/zookeeper/zookeeper-jute/$zookeeper_version/zookeeper-jute-$zookeeper_version.jar")
fi
# compare the first digit of htrace core version to determine the artifact name
if [[ "${htrace_core_version%%.*}" -lt 4 ]]; then
urls+=("${base_url}org/apache/htrace/htrace-core/${htrace_core_version}/htrace-core-${htrace_core_version}.jar")
else
urls+=("${base_url}org/apache/htrace/htrace-core4/${htrace_core_version}/htrace-core4-${htrace_core_version}.jar")
fi
# if there's already a guava jar (e.g. geoserver) don't install guava to avoid conflicts
if [ -z "$(find -L $install_dir -maxdepth 1 -name 'guava-*' -print -quit)" ]; then
urls+=("${base_url}com/google/guava/guava/${guava_version}/guava-${guava_version}.jar")
fi
downloadUrls "$install_dir" urls[@]
| aheyne/geomesa | geomesa-hbase/geomesa-hbase-tools/bin/install-hadoop.sh | Shell | apache-2.0 | 4,001 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/sagemaker/model/DeleteModelRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::SageMaker::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
DeleteModelRequest::DeleteModelRequest() :
m_modelNameHasBeenSet(false)
{
}
Aws::String DeleteModelRequest::SerializePayload() const
{
JsonValue payload;
if(m_modelNameHasBeenSet)
{
payload.WithString("ModelName", m_modelName);
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection DeleteModelRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "SageMaker.DeleteModel"));
return headers;
}
| awslabs/aws-sdk-cpp | aws-cpp-sdk-sagemaker/source/model/DeleteModelRequest.cpp | C++ | apache-2.0 | 884 |
package org.apache.maven.model.transform;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.function.Function;
import org.codehaus.plexus.util.xml.pull.XmlPullParser;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class ParentXMLFilterTest
extends AbstractXMLFilterTests
{
private Function<XmlPullParser, ParentXMLFilter> filterCreator;
@BeforeEach
void reset() {
filterCreator = null;
}
@Override
protected ParentXMLFilter getFilter( XmlPullParser parser )
{
Function<XmlPullParser, ParentXMLFilter> filterCreator =
(this.filterCreator != null ? this.filterCreator : this::createFilter);
return filterCreator.apply(parser);
}
protected ParentXMLFilter createFilter( XmlPullParser parser ) {
return createFilter( parser,
x -> Optional.of(new RelativeProject("GROUPID", "ARTIFACTID", "1.0.0")),
Paths.get( "pom.xml").toAbsolutePath() );
}
protected ParentXMLFilter createFilter( XmlPullParser parser, Function<Path, Optional<RelativeProject>> pathMapper, Path projectPath ) {
ParentXMLFilter filter = new ParentXMLFilter( parser, pathMapper, projectPath );
return filter;
}
@Test
public void testMinimum()
throws Exception
{
String input = "<project><parent /></project>";
String expected = input;
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void testNoRelativePath()
throws Exception
{
String input = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<version>VERSION</version>"
+ "</parent></project>";
String expected = input;
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void testDefaultRelativePath()
throws Exception
{
String input = "<project>\n"
+ " <parent>\n"
+ " <groupId>GROUPID</groupId>\n"
+ " <artifactId>ARTIFACTID</artifactId>\n"
+ " </parent>\n"
+ "</project>";
String expected = "<project>" + System.lineSeparator()
+ " <parent>" + System.lineSeparator()
+ " <groupId>GROUPID</groupId>" + System.lineSeparator()
+ " <artifactId>ARTIFACTID</artifactId>" + System.lineSeparator()
+ " <version>1.0.0</version>" + System.lineSeparator()
+ " </parent>" + System.lineSeparator()
+ "</project>";
String actual = transform( input );
assertEquals( expected, actual );
}
/**
* An empty relative path means it must downloaded from a repository.
* That implies that the version cannot be solved (if missing, Maven should complain)
*
* @throws Exception
*/
@Test
public void testEmptyRelativePathNoVersion()
throws Exception
{
String input = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<relativePath></relativePath>"
+ "</parent></project>";
String expected = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<relativePath />" // SAX optimization, however "" != null ...
+ "</parent></project>";
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void testNoVersion()
throws Exception
{
String input = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<relativePath>RELATIVEPATH</relativePath>"
+ "</parent></project>";
String expected = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<relativePath>RELATIVEPATH</relativePath>"
+ "<version>1.0.0</version>"
+ "</parent></project>";
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void testInvalidRelativePath()
throws Exception
{
filterCreator = parser -> createFilter(parser, x -> Optional.ofNullable( null ), Paths.get( "pom.xml").toAbsolutePath() );
String input = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<relativePath>RELATIVEPATH</relativePath>"
+ "</parent></project>";
String expected = input;
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void testRelativePathAndVersion()
throws Exception
{
String input = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<relativePath>RELATIVEPATH</relativePath>"
+ "<version>1.0.0</version>"
+ "</parent></project>";
String expected = "<project><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<relativePath>RELATIVEPATH</relativePath>"
+ "<version>1.0.0</version>"
+ "</parent></project>";
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void testWithWeirdNamespace()
throws Exception
{
String input = "<relativePath:project xmlns:relativePath=\"relativePath\">"
+ "<relativePath:parent>"
+ "<relativePath:groupId>GROUPID</relativePath:groupId>"
+ "<relativePath:artifactId>ARTIFACTID</relativePath:artifactId>"
+ "<relativePath:relativePath>RELATIVEPATH</relativePath:relativePath>"
+ "</relativePath:parent></relativePath:project>";
String expected = "<relativePath:project xmlns:relativePath=\"relativePath\">"
+ "<relativePath:parent>"
+ "<relativePath:groupId>GROUPID</relativePath:groupId>"
+ "<relativePath:artifactId>ARTIFACTID</relativePath:artifactId>"
+ "<relativePath:relativePath>RELATIVEPATH</relativePath:relativePath>"
+ "<relativePath:version>1.0.0</relativePath:version>"
+ "</relativePath:parent>"
+ "</relativePath:project>";
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void comment()
throws Exception
{
String input = "<project><!--before--><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<!--version here-->"
+ "</parent>"
+ "</project>";
String expected = "<project><!--before--><parent>"
+ "<groupId>GROUPID</groupId>"
+ "<artifactId>ARTIFACTID</artifactId>"
+ "<!--version here-->"
+ "<version>1.0.0</version>"
+ "</parent>"
+ "</project>";
String actual = transform( input );
assertEquals( expected, actual );
}
@Test
public void testIndent()
throws Exception
{
String input = "<project>\n"
+ " <parent>\n"
+ " <groupId>GROUPID</groupId>\n"
+ " <artifactId>ARTIFACTID</artifactId>\n"
+ " <!--version here-->\n"
+ " </parent>\n"
+ "</project>";
String expected = "<project>" + System.lineSeparator()
+ " <parent>" + System.lineSeparator()
+ " <groupId>GROUPID</groupId>" + System.lineSeparator()
+ " <artifactId>ARTIFACTID</artifactId>" + System.lineSeparator()
+ " <!--version here-->" + System.lineSeparator()
+ " <version>1.0.0</version>" + System.lineSeparator()
+ " </parent>" + System.lineSeparator()
+ "</project>";
String actual = transform( input );
assertEquals( expected, actual );
}
}
| apache/maven | maven-model-transform/src/test/java/org/apache/maven/model/transform/ParentXMLFilterTest.java | Java | apache-2.0 | 9,688 |
/**
*
* Copyright 2003-2007 Jive Software.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jivesoftware.smackx.workgroup.agent;
import java.io.IOException;
import org.jivesoftware.smack.SmackException.NotConnectedException;
import org.jivesoftware.smack.XMPPConnection;
import org.jivesoftware.smack.packet.IQ;
import org.jivesoftware.smack.packet.SimpleIQ;
import org.jivesoftware.smack.provider.IQProvider;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
public class OfferConfirmation extends SimpleIQ {
private String userJID;
private long sessionID;
public OfferConfirmation() {
super("offer-confirmation", "http://jabber.org/protocol/workgroup");
}
public String getUserJID() {
return userJID;
}
public void setUserJID(String userJID) {
this.userJID = userJID;
}
public long getSessionID() {
return sessionID;
}
public void setSessionID(long sessionID) {
this.sessionID = sessionID;
}
public void notifyService(XMPPConnection con, String workgroup, String createdRoomName) throws NotConnectedException {
NotifyServicePacket packet = new NotifyServicePacket(workgroup, createdRoomName);
con.sendStanza(packet);
}
public static class Provider extends IQProvider<OfferConfirmation> {
@Override
public OfferConfirmation parse(XmlPullParser parser, int initialDepth)
throws XmlPullParserException, IOException {
final OfferConfirmation confirmation = new OfferConfirmation();
boolean done = false;
while (!done) {
parser.next();
String elementName = parser.getName();
if (parser.getEventType() == XmlPullParser.START_TAG && "user-jid".equals(elementName)) {
try {
confirmation.setUserJID(parser.nextText());
}
catch (NumberFormatException nfe) {
}
}
else if (parser.getEventType() == XmlPullParser.START_TAG && "session-id".equals(elementName)) {
try {
confirmation.setSessionID(Long.valueOf(parser.nextText()));
}
catch (NumberFormatException nfe) {
}
}
else if (parser.getEventType() == XmlPullParser.END_TAG && "offer-confirmation".equals(elementName)) {
done = true;
}
}
return confirmation;
}
}
/**
* Stanza(/Packet) for notifying server of RoomName
*/
private class NotifyServicePacket extends IQ {
String roomName;
NotifyServicePacket(String workgroup, String roomName) {
super("offer-confirmation", "http://jabber.org/protocol/workgroup");
this.setTo(workgroup);
this.setType(IQ.Type.result);
this.roomName = roomName;
}
@Override
protected IQChildElementXmlStringBuilder getIQChildElementBuilder(IQChildElementXmlStringBuilder xml) {
xml.attribute("roomname", roomName);
xml.setEmptyElement();
return xml;
}
}
}
| Soo000/SooChat | src/org/jivesoftware/smackx/workgroup/agent/OfferConfirmation.java | Java | apache-2.0 | 3,847 |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeproxyconfig
import (
"fmt"
"sort"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClientConnectionConfiguration contains details for constructing a client.
type ClientConnectionConfiguration struct {
// kubeConfigFile is the path to a kubeconfig file.
KubeConfigFile string
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
// default value of 'application/json'. This field will control all connections to the server used by a particular
// client.
AcceptContentTypes string
// contentType is the content type used when sending data to the server from this client.
ContentType string
// qps controls the number of queries per second allowed for this connection.
QPS float32
// burst allows extra queries to accumulate when a client is exceeding its rate.
Burst int
}
// KubeProxyIPTablesConfiguration contains iptables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPTablesConfiguration struct {
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
// the pure iptables proxy mode. Values must be within the range [0, 31].
MasqueradeBit *int32
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
MasqueradeAll bool
// syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration
// minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration
}
// KubeProxyIPVSConfiguration contains ipvs-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPVSConfiguration struct {
// syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration
// minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration
// ipvs scheduler
Scheduler string
}
// KubeProxyConntrackConfiguration contains conntrack settings for
// the Kubernetes proxy server.
type KubeProxyConntrackConfiguration struct {
// max is the maximum number of NAT connections to track (0 to
// leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin.
Max int32
// maxPerCore is the maximum number of NAT connections to track
// per CPU core (0 to leave the limit as-is and ignore conntrackMin).
MaxPerCore int32
// min is the minimum value of connect-tracking records to allocate,
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
Min int32
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '2s'). Must be greater than 0.
TCPEstablishedTimeout metav1.Duration
// tcpCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
TCPCloseWaitTimeout metav1.Duration
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeProxyConfiguration contains everything necessary to configure the
// Kubernetes proxy server.
type KubeProxyConfiguration struct {
metav1.TypeMeta
// featureGates is a comma-separated list of key=value pairs that control
// which alpha/beta features are enabled.
//
// TODO this really should be a map but that requires refactoring all
// components to use config files because local-up-cluster.sh only supports
// the --feature-gates flag right now, which is comma-separated key=value
// pairs.
FeatureGates string
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
// for all interfaces)
BindAddress string
// healthzBindAddress is the IP address and port for the health check server to serve on,
// defaulting to 0.0.0.0:10256
HealthzBindAddress string
// metricsBindAddress is the IP address and port for the metrics server to serve on,
// defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
MetricsBindAddress string
// enableProfiling enables profiling via web interface on /debug/pprof handler.
// Profiling handlers will be handled by metrics server.
EnableProfiling bool
// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
// bridge traffic coming from outside of the cluster. If not provided,
// no off-cluster bridging will be performed.
ClusterCIDR string
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
HostnameOverride string
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
// server to use when communicating with the apiserver.
ClientConnection ClientConnectionConfiguration
// iptables contains iptables-related configuration options.
IPTables KubeProxyIPTablesConfiguration
// ipvs contains ipvs-related configuration options.
IPVS KubeProxyIPVSConfiguration
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int32
// mode specifies which proxy mode to use.
Mode ProxyMode
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
PortRange string
// resourceContainer is the absolute name of the resource-only container to create and run
// the Kube-proxy in (Default: /kube-proxy).
ResourceContainer string
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
// Must be greater than 0. Only applicable for proxyMode=userspace.
UDPIdleTimeout metav1.Duration
// conntrack contains conntrack-related configuration options.
Conntrack KubeProxyConntrackConfiguration
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
// than 0.
ConfigSyncPeriod metav1.Duration
}
// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'
// (newer, faster). If blank, use the best-available proxy (currently iptables, but may
// change in future versions). If the iptables proxy is selected, regardless of how, but
// the system's kernel or iptables versions are insufficient, this always falls back to the
// userspace proxy.
type ProxyMode string
const (
ProxyModeUserspace ProxyMode = "userspace"
ProxyModeIPTables ProxyMode = "iptables"
ProxyModeIPVS ProxyMode = "ipvs"
)
// IPVSSchedulerMethod is the algorithm for allocating TCP connections and
// UDP datagrams to real servers. Scheduling algorithms are imple-
//wanted as kernel modules. Ten are shipped with the Linux Virtual Server.
type IPVSSchedulerMethod string
const (
// Robin Robin distributes jobs equally amongst the available real servers.
RoundRobin IPVSSchedulerMethod = "rr"
// Weighted Round Robin assigns jobs to real servers proportionally to there real servers' weight.
// Servers with higher weights receive new jobs first and get more jobs than servers with lower weights.
// Servers with equal weights get an equal distribution of new jobs.
WeightedRoundRobin IPVSSchedulerMethod = "wrr"
// Least Connection assigns more jobs to real servers with fewer active jobs.
LeastConnection IPVSSchedulerMethod = "lc"
// Weighted Least Connection assigns more jobs to servers with fewer jobs and
// relative to the real servers’weight(Ci/Wi).
WeightedLeastConnection IPVSSchedulerMethod = "wlc"
// Locality Based Least Connection assigns jobs destined for the same IP address to the same server if
// the server is not overloaded and available; otherwise assign jobs to servers with fewer jobs,
// and keep it for future assignment.
LocalityBasedLeastConnection IPVSSchedulerMethod = "lblc"
// Locality Based Least Connection with Replication assigns jobs destined for the same IP address to the
// least-connection node in the server set for the IP address. If all the node in the server set are over loaded,
// it picks up a node with fewer jobs in the cluster and adds it in the sever set for the target.
// If the server set has not been modified for the specified time, the most loaded node is removed from the server set,
// in order to avoid high degree of replication.
LocalityBasedLeastConnectionWithReplication IPVSSchedulerMethod = "lblcr"
// Source Hashing assigns jobs to servers through looking up a statically assigned hash table
// by their source IP addresses.
SourceHashing IPVSSchedulerMethod = "sh"
// Destination Hashing assigns jobs to servers through looking up a statically assigned hash table
// by their destination IP addresses.
DestinationHashing IPVSSchedulerMethod = "dh"
// Shortest Expected Delay assigns an incoming job to the server with the shortest expected delay.
// The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which
// Ci is the number of jobs on the the ith server and Ui is the fixed service rate (weight) of the ith server.
ShortestExpectedDelay IPVSSchedulerMethod = "sed"
// Never Queue assigns an incoming job to an idle server if there is, instead of waiting for a fast one;
// if all the servers are busy, it adopts the Shortest Expected Delay policy to assign the job.
NeverQueue IPVSSchedulerMethod = "nq"
)
func (m *ProxyMode) Set(s string) error {
*m = ProxyMode(s)
return nil
}
func (m *ProxyMode) String() string {
if m != nil {
return string(*m)
}
return ""
}
func (m *ProxyMode) Type() string {
return "ProxyMode"
}
type ConfigurationMap map[string]string
func (m *ConfigurationMap) String() string {
pairs := []string{}
for k, v := range *m {
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
func (m *ConfigurationMap) Set(value string) error {
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "=", 2)
if len(arr) == 2 {
(*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1])
} else {
(*m)[strings.TrimSpace(arr[0])] = ""
}
}
return nil
}
func (*ConfigurationMap) Type() string {
return "mapStringString"
}
| dchen1107/kubernetes-1 | pkg/proxy/apis/kubeproxyconfig/types.go | GO | apache-2.0 | 10,875 |
#region License
//
// Copyright (c) 2018, Fluent Migrator Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#endregion
using FluentMigrator.Runner.Helpers;
#region License
//
// Copyright (c) 2007-2018, Sean Chambers <schambers80@gmail.com>
// Copyright (c) 2010, Nathan Brown
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#endregion
using System;
using System.Collections.Generic;
using System.Data;
using System.Data.Common;
using System.IO;
using FluentMigrator.Expressions;
using FluentMigrator.Runner.BatchParser;
using FluentMigrator.Runner.BatchParser.Sources;
using FluentMigrator.Runner.BatchParser.SpecialTokenSearchers;
using FluentMigrator.Runner.Generators.SqlServer;
using FluentMigrator.Runner.Initialization;
using JetBrains.Annotations;
using Microsoft.Data.SqlClient;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
namespace FluentMigrator.Runner.Processors.SqlServer
{
public class SqlServer2000Processor : GenericProcessorBase
{
[CanBeNull]
private readonly IServiceProvider _serviceProvider;
[Obsolete]
public SqlServer2000Processor(IDbConnection connection, IMigrationGenerator generator, IAnnouncer announcer, IMigrationProcessorOptions options, IDbFactory factory)
: base(connection, factory, generator, announcer, options)
{
}
public SqlServer2000Processor(
[NotNull] ILogger<SqlServer2000Processor> logger,
[NotNull] SqlServer2000Generator generator,
[NotNull] IOptionsSnapshot<ProcessorOptions> options,
[NotNull] IConnectionStringAccessor connectionStringAccessor,
[NotNull] IServiceProvider serviceProvider)
: this(SqlClientFactory.Instance, logger, generator, options, connectionStringAccessor, serviceProvider)
{
}
protected SqlServer2000Processor(
DbProviderFactory factory,
[NotNull] ILogger logger,
[NotNull] SqlServer2000Generator generator,
[NotNull] IOptionsSnapshot<ProcessorOptions> options,
[NotNull] IConnectionStringAccessor connectionStringAccessor,
[NotNull] IServiceProvider serviceProvider)
: base(() => factory, generator, logger, options.Value, connectionStringAccessor)
{
_serviceProvider = serviceProvider;
}
public override string DatabaseType => "SqlServer2000";
public override IList<string> DatabaseTypeAliases { get; } = new List<string>() { "SqlServer" };
public override void BeginTransaction()
{
base.BeginTransaction();
Logger.LogSql("BEGIN TRANSACTION");
}
public override void CommitTransaction()
{
base.CommitTransaction();
Logger.LogSql("COMMIT TRANSACTION");
}
public override void RollbackTransaction()
{
if (Transaction == null)
{
return;
}
base.RollbackTransaction();
Logger.LogSql("ROLLBACK TRANSACTION");
}
public override bool SchemaExists(string schemaName)
{
return true;
}
public override bool TableExists(string schemaName, string tableName)
{
try
{
return Exists("SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '{0}'", FormatHelper.FormatSqlEscape(tableName));
}
catch (Exception e)
{
Logger.LogError(e, "There was an exception checking if table {Table} in {Schema} exists", tableName, schemaName);
}
return false;
}
public override bool ColumnExists(string schemaName, string tableName, string columnName)
{
return Exists("SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '{0}' AND COLUMN_NAME = '{1}'",
FormatHelper.FormatSqlEscape(tableName),
FormatHelper.FormatSqlEscape(columnName));
}
public override bool ConstraintExists(string schemaName, string tableName, string constraintName)
{
return Exists("SELECT * FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_CATALOG = DB_NAME() AND TABLE_NAME = '{0}' AND CONSTRAINT_NAME = '{1}'",
FormatHelper.FormatSqlEscape(tableName), FormatHelper.FormatSqlEscape(constraintName));
}
public override bool IndexExists(string schemaName, string tableName, string indexName)
{
return Exists("SELECT NULL FROM sysindexes WHERE name = '{0}'", FormatHelper.FormatSqlEscape(indexName));
}
public override bool SequenceExists(string schemaName, string sequenceName)
{
return false;
}
public override bool DefaultValueExists(string schemaName, string tableName, string columnName, object defaultValue)
{
return false;
}
public override DataSet ReadTableData(string schemaName, string tableName)
{
return Read("SELECT * FROM [{0}]", tableName);
}
public override DataSet Read(string template, params object[] args)
{
EnsureConnectionIsOpen();
using (var command = CreateCommand(string.Format(template, args)))
using (var reader = command.ExecuteReader())
{
return reader.ReadDataSet();
}
}
public override bool Exists(string template, params object[] args)
{
EnsureConnectionIsOpen();
using (var command = CreateCommand(string.Format(template, args)))
using (var reader = command.ExecuteReader())
{
return reader.Read();
}
}
public override void Execute(string template, params object[] args)
{
Process(string.Format(template, args));
}
protected override void Process(string sql)
{
Logger.LogSql(sql);
if (Options.PreviewOnly || string.IsNullOrEmpty(sql))
{
return;
}
EnsureConnectionIsOpen();
if (ContainsGo(sql))
{
ExecuteBatchNonQuery(sql);
}
else
{
ExecuteNonQuery(sql);
}
}
private bool ContainsGo(string sql)
{
var containsGo = false;
var parser = _serviceProvider?.GetService<SqlServerBatchParser>() ?? new SqlServerBatchParser();
parser.SpecialToken += (sender, args) => containsGo = true;
using (var source = new TextReaderSource(new StringReader(sql), true))
{
parser.Process(source);
}
return containsGo;
}
private void ExecuteNonQuery(string sql)
{
using (var command = CreateCommand(sql))
{
try
{
command.ExecuteNonQuery();
}
catch (Exception ex)
{
using (var message = new StringWriter())
{
ReThrowWithSql(ex, sql);
}
}
}
}
private void ExecuteBatchNonQuery(string sql)
{
sql += "\nGO"; // make sure last batch is executed.
var sqlBatch = string.Empty;
try
{
var parser = _serviceProvider?.GetService<SqlServerBatchParser>() ?? new SqlServerBatchParser();
parser.SqlText += (sender, args) => sqlBatch = args.SqlText.Trim();
parser.SpecialToken += (sender, args) =>
{
if (string.IsNullOrEmpty(sqlBatch))
{
return;
}
if (args.Opaque is GoSearcher.GoSearcherParameters goParams)
{
using (var command = CreateCommand(sqlBatch))
{
for (var i = 0; i != goParams.Count; ++i)
{
command.ExecuteNonQuery();
}
}
}
sqlBatch = null;
};
using (var source = new TextReaderSource(new StringReader(sql), true))
{
parser.Process(source, stripComments: Options.StripComments);
}
}
catch (Exception ex)
{
using (var message = new StringWriter())
{
ReThrowWithSql(ex, string.IsNullOrEmpty(sqlBatch) ? sql : sqlBatch);
}
}
}
public override void Process(PerformDBOperationExpression expression)
{
EnsureConnectionIsOpen();
expression.Operation?.Invoke(Connection, Transaction);
}
}
}
| fluentmigrator/fluentmigrator | src/FluentMigrator.Runner.SqlServer/Processors/SqlServer/SqlServer2000Processor.cs | C# | apache-2.0 | 10,264 |
/**
* for(var p in Script.scripts) {
*
* var script = Script.scripts[p];
* var handle = script.handle;
* var base = script.base;
* var limit = base + script.extent;
*
* print(script+"\n");
*
* for(var i = base; i < limit; i++) {
* var pc = jsd.GetClosestPC(handle,i)
* var hascode = String(pc).length && i == jsd.GetClosestLine(handle,pc);
* print("line "+i+" "+ (hascode ? "has code" : "has NO code"));
* }
* print("...............................\n");
* }
*/
function rlocals()
{
var retval = "";
var name = "___UNIQUE_NAME__";
var fun = ""+
"var text = \\\"\\\";"+
"for(var p in ob)"+
"{"+
" if(text != \\\"\\\")"+
" text += \\\",\\\";"+
" text += p;"+
"}"+
"return text;";
reval(name+" = new Function(\"ob\",\""+fun+"\")");
// show(name);
retval = _reval([name+"("+"arguments.callee"+")"]);
reval("delete "+name);
return retval;
}
function e(a)
{
return eval(a);
} | racker/omnibus | source/js/jsd/jsdb/f.js | JavaScript | apache-2.0 | 1,043 |
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.test.container.listeners;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.kie.api.event.process.DefaultProcessEventListener;
import org.kie.api.event.process.ProcessCompletedEvent;
import org.kie.api.event.process.ProcessNodeLeftEvent;
import org.kie.api.event.process.ProcessNodeTriggeredEvent;
import org.kie.api.event.process.ProcessStartedEvent;
import org.kie.api.event.process.ProcessVariableChangedEvent;
import org.kie.api.runtime.process.ProcessInstance;
public class TrackingProcessEventListener extends DefaultProcessEventListener {
private final List<String> processesStarted = new ArrayList<String>();
private final List<String> processesCompleted = new ArrayList<String>();
private final List<String> processesAborted = new ArrayList<String>();
private final List<String> nodesTriggered = new ArrayList<String>();
private final List<String> nodesLeft = new ArrayList<String>();
private final List<String> variablesChanged = new ArrayList<String>();
@Override
public void beforeNodeTriggered(ProcessNodeTriggeredEvent event) {
nodesTriggered.add(event.getNodeInstance().getNodeName());
}
@Override
public void beforeNodeLeft(ProcessNodeLeftEvent event) {
nodesLeft.add(event.getNodeInstance().getNodeName());
}
@Override
public void beforeProcessStarted(ProcessStartedEvent event) {
processesStarted.add(event.getProcessInstance().getProcessId());
}
@Override
public void beforeProcessCompleted(ProcessCompletedEvent event) {
if (event.getProcessInstance().getState() == ProcessInstance.STATE_ABORTED) {
processesAborted.add(event.getProcessInstance().getProcessId());
} else {
processesCompleted.add(event.getProcessInstance().getProcessId());
}
}
@Override
public void beforeVariableChanged(ProcessVariableChangedEvent event) {
variablesChanged.add(event.getVariableId());
}
public List<String> getNodesTriggered() {
return Collections.unmodifiableList(nodesTriggered);
}
public List<String> getNodesLeft() {
return Collections.unmodifiableList(nodesLeft);
}
public List<String> getProcessesStarted() {
return Collections.unmodifiableList(processesStarted);
}
public List<String> getProcessesCompleted() {
return Collections.unmodifiableList(processesCompleted);
}
public List<String> getProcessesAborted() {
return Collections.unmodifiableList(processesAborted);
}
public List<String> getVariablesChanged() {
return Collections.unmodifiableList(variablesChanged);
}
public boolean wasNodeTriggered(String nodeName) {
return nodesTriggered.contains(nodeName);
}
public boolean wasNodeLeft(String nodeName) {
return nodesLeft.contains(nodeName);
}
public boolean wasProcessStarted(String processName) {
return processesStarted.contains(processName);
}
public boolean wasProcessCompleted(String processName) {
return processesCompleted.contains(processName);
}
public boolean wasProcessAborted(String processName) {
return processesAborted.contains(processName);
}
public boolean wasVariableChanged(String variableId) {
return variablesChanged.contains(variableId);
}
public void clear() {
nodesTriggered.clear();
nodesLeft.clear();
processesStarted.clear();
processesCompleted.clear();
processesAborted.clear();
variablesChanged.clear();
}
}
| etirelli/jbpm | jbpm-container-test/jbpm-in-container-test/jbpm-container-test-suite/src/main/java/org/jbpm/test/container/listeners/TrackingProcessEventListener.java | Java | apache-2.0 | 4,273 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.utils.db;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import com.cloud.utils.exception.CloudRuntimeException;
public class DriverLoader {
private static final Logger LOGGER = Logger.getLogger(DriverLoader.class.getName());
private static final List<String> LOADED_DRIVERS;
private static final Map<String, String> DRIVERS;
static {
DRIVERS = new HashMap<String, String>();
DRIVERS.put("jdbc:mysql", "com.mysql.cj.jdbc.Driver");
DRIVERS.put("jdbc:postgresql", "org.postgresql.Driver");
DRIVERS.put("jdbc:h2", "org.h2.Driver");
LOADED_DRIVERS = new ArrayList<String>();
}
public static void loadDriver(String dbDriver) {
String driverClass = DRIVERS.get(dbDriver);
if (driverClass == null) {
LOGGER.error("DB driver type " + dbDriver + " is not supported!");
throw new CloudRuntimeException("DB driver type " + dbDriver + " is not supported!");
}
if (LOADED_DRIVERS.contains(dbDriver)) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("DB driver " + driverClass + " was already loaded.");
}
return;
}
try {
Class.forName(driverClass).newInstance();
LOADED_DRIVERS.add(dbDriver);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Successfully loaded DB driver " + driverClass);
}
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
LOGGER.error("Failed to load DB driver " + driverClass);
throw new CloudRuntimeException("Failed to load DB driver " + driverClass, e);
}
}
}
| GabrielBrascher/cloudstack | framework/db/src/main/java/com/cloud/utils/db/DriverLoader.java | Java | apache-2.0 | 2,605 |
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.codeInspection.dataFlow;
import com.intellij.codeInspection.dataFlow.value.DfaRelationValue.RelationType;
import com.intellij.lang.injection.InjectedLanguageManager;
import com.intellij.psi.*;
import com.intellij.util.containers.ContainerUtil;
import com.siyeh.ig.psiutils.ExpressionUtils;
import com.siyeh.ig.psiutils.MethodUtils;
import com.siyeh.ig.psiutils.TypeUtils;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.regex.Pattern;
import static com.intellij.codeInspection.dataFlow.MethodContract.ValueConstraint.*;
import static com.intellij.codeInspection.dataFlow.StandardMethodContract.createConstraintArray;
/**
* @author peter
*/
public class HardcodedContracts {
private static final Pattern FIRST_OR_LAST = Pattern.compile("first|last");
private static final Pattern CONTAINS_KEY_VALUE = Pattern.compile("containsKey|containsValue");
// All these methods take array as 1st parameter, from index as 2nd and to index as 3rd
// thus ARRAY_RANGE_CONTRACTS are applicable to them
private static final Pattern ARRAY_RANGED_METHODS =
Pattern.compile("binarySearch|fill|parallelPrefix|parallelSort|sort|spliterator|stream");
private static final List<MethodContract> ARRAY_RANGE_CONTRACTS = ContainerUtil.immutableList(
nonnegativeArgumentContract(1),
nonnegativeArgumentContract(2),
MethodContract.singleConditionContract(ContractValue.argument(1), RelationType.GT,
ContractValue.argument(0).specialField(SpecialField.ARRAY_LENGTH), THROW_EXCEPTION),
MethodContract.singleConditionContract(ContractValue.argument(2), RelationType.GT,
ContractValue.argument(0).specialField(SpecialField.ARRAY_LENGTH), THROW_EXCEPTION),
MethodContract.singleConditionContract(ContractValue.argument(1), RelationType.GT,
ContractValue.argument(2), THROW_EXCEPTION)
);
public static List<MethodContract> getHardcodedContracts(@NotNull PsiMethod method, @Nullable PsiMethodCallExpression call) {
PsiClass owner = method.getContainingClass();
if (owner == null ||
InjectedLanguageManager.getInstance(owner.getProject()).isInjectedFragment(owner.getContainingFile())) {
return Collections.emptyList();
}
final int paramCount = method.getParameterList().getParametersCount();
String className = owner.getQualifiedName();
if (className == null) return Collections.emptyList();
String methodName = method.getName();
if ("java.lang.System".equals(className)) {
if ("exit".equals(methodName)) {
return Collections.singletonList(new StandardMethodContract(createConstraintArray(paramCount), THROW_EXCEPTION));
}
}
else if ("com.google.common.base.Preconditions".equals(className)) {
if ("checkNotNull".equals(methodName) && paramCount > 0) {
return failIfNull(0, paramCount);
}
if (("checkArgument".equals(methodName) || "checkState".equals(methodName)) && paramCount > 0) {
MethodContract.ValueConstraint[] constraints = createConstraintArray(paramCount);
constraints[0] = FALSE_VALUE;
return Collections.singletonList(new StandardMethodContract(constraints, THROW_EXCEPTION));
}
}
else if ("java.util.Objects".equals(className)) {
if ("requireNonNull".equals(methodName) && paramCount > 0) {
return failIfNull(0, paramCount);
}
}
else if (CommonClassNames.JAVA_LANG_STRING.equals(className)) {
if (("charAt".equals(methodName) || "codePointAt".equals(methodName)) && paramCount == 1) {
return Arrays.asList(nonnegativeArgumentContract(0),
specialFieldRangeContract(0, RelationType.LT, SpecialField.STRING_LENGTH));
}
else if (("substring".equals(methodName) || "subSequence".equals(methodName)) && paramCount <= 2) {
List<MethodContract> contracts = new ArrayList<>(5);
contracts.add(nonnegativeArgumentContract(0));
contracts.add(specialFieldRangeContract(0, RelationType.LE, SpecialField.STRING_LENGTH));
if (paramCount == 2) {
contracts.add(nonnegativeArgumentContract(1));
contracts.add(specialFieldRangeContract(1, RelationType.LE, SpecialField.STRING_LENGTH));
contracts.add(MethodContract
.singleConditionContract(ContractValue.argument(0), RelationType.LE.getNegated(), ContractValue.argument(1),
THROW_EXCEPTION));
}
return contracts;
}
else if ("isEmpty".equals(methodName) && paramCount == 0) {
return SpecialField.STRING_LENGTH.getEmptyContracts();
}
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_COLLECTION, PsiType.BOOLEAN, "isEmpty")) {
return SpecialField.COLLECTION_SIZE.getEmptyContracts();
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_COLLECTION, PsiType.BOOLEAN, "contains", (PsiType)null)) {
return Collections.singletonList(MethodContract.singleConditionContract(
ContractValue.qualifier().specialField(SpecialField.COLLECTION_SIZE), RelationType.EQ, ContractValue.zero(), FALSE_VALUE));
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_SET, PsiType.BOOLEAN, "equals", (PsiType)null) ||
MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_LIST, PsiType.BOOLEAN, "equals", (PsiType)null)) {
return Collections.singletonList(MethodContract.singleConditionContract(
ContractValue.qualifier().specialField(SpecialField.COLLECTION_SIZE), RelationType.NE,
ContractValue.argument(0).specialField(SpecialField.COLLECTION_SIZE), FALSE_VALUE));
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_LIST, null, "get", PsiType.INT)) {
return Arrays.asList(nonnegativeArgumentContract(0),
specialFieldRangeContract(0, RelationType.LT, SpecialField.COLLECTION_SIZE));
}
else if (MethodUtils.methodMatches(method, "java.util.SortedSet", null, FIRST_OR_LAST)) {
return Collections.singletonList(MethodContract.singleConditionContract(
ContractValue.qualifier().specialField(SpecialField.COLLECTION_SIZE), RelationType.EQ,
ContractValue.zero(), THROW_EXCEPTION));
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_MAP, PsiType.BOOLEAN, "isEmpty")) {
return SpecialField.MAP_SIZE.getEmptyContracts();
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_MAP, PsiType.BOOLEAN, CONTAINS_KEY_VALUE, (PsiType)null)) {
return Collections.singletonList(MethodContract.singleConditionContract(
ContractValue.qualifier().specialField(SpecialField.MAP_SIZE), RelationType.EQ, ContractValue.zero(), FALSE_VALUE));
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_MAP, PsiType.BOOLEAN, "equals", (PsiType)null)) {
return Collections.singletonList(MethodContract.singleConditionContract(
ContractValue.qualifier().specialField(SpecialField.MAP_SIZE), RelationType.NE,
ContractValue.argument(0).specialField(SpecialField.MAP_SIZE), FALSE_VALUE));
}
else if (MethodUtils.methodMatches(method, CommonClassNames.JAVA_UTIL_ARRAYS, null, ARRAY_RANGED_METHODS, (PsiType[])null) &&
paramCount >= 3) {
return ARRAY_RANGE_CONTRACTS;
}
else if ("org.apache.commons.lang.Validate".equals(className) ||
"org.apache.commons.lang3.Validate".equals(className) ||
"org.springframework.util.Assert".equals(className)) {
if (("isTrue".equals(methodName) || "state".equals(methodName)) && paramCount > 0) {
MethodContract.ValueConstraint[] constraints = createConstraintArray(paramCount);
constraints[0] = FALSE_VALUE;
return Collections.singletonList(new StandardMethodContract(constraints, THROW_EXCEPTION));
}
if ("notNull".equals(methodName) && paramCount > 0) {
MethodContract.ValueConstraint[] constraints = createConstraintArray(paramCount);
constraints[0] = NULL_VALUE;
return Collections.singletonList(new StandardMethodContract(constraints, THROW_EXCEPTION));
}
}
else if (isJunit(className) || isTestng(className) ||
className.startsWith("com.google.common.truth.") ||
className.startsWith("org.assertj.core.api.")) {
return handleTestFrameworks(paramCount, className, methodName, call);
}
else if (TypeUtils.isOptional(owner)) {
if (DfaOptionalSupport.isOptionalGetMethodName(methodName) || "orElseThrow".equals(methodName)) {
return Arrays.asList(optionalAbsentContract(THROW_EXCEPTION), MethodContract.trivialContract(NOT_NULL_VALUE));
}
else if ("isPresent".equals(methodName)) {
return Arrays.asList(optionalAbsentContract(FALSE_VALUE), MethodContract.trivialContract(TRUE_VALUE));
}
}
return Collections.emptyList();
}
static MethodContract optionalAbsentContract(MethodContract.ValueConstraint returnValue) {
return MethodContract
.singleConditionContract(ContractValue.qualifier(), RelationType.IS, ContractValue.optionalValue(false), returnValue);
}
static MethodContract nonnegativeArgumentContract(int argNumber) {
return MethodContract
.singleConditionContract(ContractValue.argument(argNumber), RelationType.LT, ContractValue.zero(), THROW_EXCEPTION);
}
static MethodContract specialFieldRangeContract(int index, RelationType type, SpecialField specialField) {
return MethodContract.singleConditionContract(ContractValue.argument(index), type.getNegated(),
ContractValue.qualifier().specialField(specialField), THROW_EXCEPTION);
}
private static boolean isJunit(String className) {
return className.startsWith("junit.framework.") || className.startsWith("org.junit.");
}
private static boolean isJunit5(String className) {
return className.startsWith("org.junit.jupiter.");
}
private static boolean isTestng(String className) {
return className.startsWith("org.testng.");
}
private static boolean isNotNullMatcher(PsiExpression expr) {
if (expr instanceof PsiMethodCallExpression) {
String calledName = ((PsiMethodCallExpression)expr).getMethodExpression().getReferenceName();
if ("notNullValue".equals(calledName)) {
return true;
}
if ("not".equals(calledName)) {
PsiExpression[] notArgs = ((PsiMethodCallExpression)expr).getArgumentList().getExpressions();
if (notArgs.length == 1 &&
notArgs[0] instanceof PsiMethodCallExpression &&
"equalTo".equals(((PsiMethodCallExpression)notArgs[0]).getMethodExpression().getReferenceName())) {
PsiExpression[] equalArgs = ((PsiMethodCallExpression)notArgs[0]).getArgumentList().getExpressions();
if (equalArgs.length == 1 && ExpressionUtils.isNullLiteral(equalArgs[0])) {
return true;
}
}
}
if ("is".equals(calledName)) {
PsiExpression[] args = ((PsiMethodCallExpression)expr).getArgumentList().getExpressions();
if (args.length == 1) return isNotNullMatcher(args[0]);
}
}
return false;
}
private static List<MethodContract> handleTestFrameworks(int paramCount, String className, String methodName,
@Nullable PsiMethodCallExpression call) {
if (("assertThat".equals(methodName) || "assumeThat".equals(methodName) || "that".equals(methodName)) && call != null) {
return handleAssertThat(paramCount, call);
}
if (!isJunit(className) && !isTestng(className)) {
return Collections.emptyList();
}
boolean testng = isTestng(className);
if ("fail".equals(methodName)) {
return Collections.singletonList(new StandardMethodContract(createConstraintArray(paramCount), THROW_EXCEPTION));
}
if (paramCount == 0) return Collections.emptyList();
int checkedParam = testng || isJunit5(className) ? 0 : paramCount - 1;
MethodContract.ValueConstraint[] constraints = createConstraintArray(paramCount);
if ("assertTrue".equals(methodName) || "assumeTrue".equals(methodName)) {
constraints[checkedParam] = FALSE_VALUE;
return Collections.singletonList(new StandardMethodContract(constraints, THROW_EXCEPTION));
}
if ("assertFalse".equals(methodName) || "assumeFalse".equals(methodName)) {
constraints[checkedParam] = TRUE_VALUE;
return Collections.singletonList(new StandardMethodContract(constraints, THROW_EXCEPTION));
}
if ("assertNull".equals(methodName)) {
constraints[checkedParam] = NOT_NULL_VALUE;
return Collections.singletonList(new StandardMethodContract(constraints, THROW_EXCEPTION));
}
if ("assertNotNull".equals(methodName) || "assumeNotNull".equals(methodName)) {
return failIfNull(checkedParam, paramCount);
}
return Collections.emptyList();
}
@NotNull
private static List<MethodContract> handleAssertThat(int paramCount, @NotNull PsiMethodCallExpression call) {
PsiExpression[] args = call.getArgumentList().getExpressions();
if (args.length == paramCount) {
for (int i = 1; i < args.length; i++) {
if (isNotNullMatcher(args[i])) {
return failIfNull(i - 1, paramCount);
}
}
if (args.length == 1 && hasNotNullChainCall(call)) {
return failIfNull(0, 1);
}
}
return Collections.emptyList();
}
private static boolean hasNotNullChainCall(PsiMethodCallExpression call) {
Iterable<PsiElement> exprParents = SyntaxTraverser.psiApi().parents(call).
takeWhile(e -> !(e instanceof PsiStatement) && !(e instanceof PsiMember));
return ContainerUtil.exists(exprParents, HardcodedContracts::isNotNullCall);
}
private static boolean isNotNullCall(PsiElement ref) {
return ref instanceof PsiReferenceExpression &&
"isNotNull".equals(((PsiReferenceExpression)ref).getReferenceName()) &&
ref.getParent() instanceof PsiMethodCallExpression;
}
@NotNull
private static List<MethodContract> failIfNull(int argIndex, int argCount) {
MethodContract.ValueConstraint[] constraints = createConstraintArray(argCount);
constraints[argIndex] = NULL_VALUE;
return Collections.singletonList(new StandardMethodContract(constraints, THROW_EXCEPTION));
}
public static boolean isHardcodedPure(PsiMethod method) {
PsiClass aClass = method.getContainingClass();
if (aClass == null) return false;
String className = aClass.getQualifiedName();
if (className == null) return false;
String name = method.getName();
if ("java.util.Objects".equals(className) && "requireNonNull".equals(name)) {
PsiParameter[] parameters = method.getParameterList().getParameters();
if (parameters.length == 2 && parameters[1].getType().getCanonicalText().contains("Supplier")) {
return false;
}
}
if ("java.lang.System".equals(className)) {
return false;
}
if (CommonClassNames.JAVA_UTIL_ARRAYS.equals(className)) {
return name.equals("binarySearch") || name.equals("spliterator") || name.equals("stream");
}
return true;
}
public static boolean hasHardcodedContracts(@Nullable PsiElement element) {
if (element instanceof PsiMethod) {
return !getHardcodedContracts((PsiMethod)element, null).isEmpty();
}
if (element instanceof PsiParameter) {
PsiElement parent = element.getParent();
return parent != null && hasHardcodedContracts(parent.getParent());
}
return false;
}
}
| asedunov/intellij-community | java/java-analysis-impl/src/com/intellij/codeInspection/dataFlow/HardcodedContracts.java | Java | apache-2.0 | 16,581 |
/*
Copyright 2008-2013 ITACA-TSB, http://www.tsb.upv.es/
Instituto Tecnologico de Aplicaciones de Comunicacion
Avanzadas - Grupo Tecnologias para la Salud y el
Bienestar (TSB)
See the NOTICE file distributed with this work for additional
information regarding copyright ownership
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.bubblecloud.zigbee.network.packet.simple;
import org.bubblecloud.zigbee.network.packet.ZToolCMD;
import org.bubblecloud.zigbee.network.packet.ZToolPacket;
import org.bubblecloud.zigbee.util.DoubleByte;
/**
* This command puts the device into the Allow Binding Mode for a given period of time.
* This allows a peer device to establish a binding with this device (in the Allow Binding
* Mode) by issuing the zb_BindDevice with a destination address of NULL.
* @author <a href="mailto:alfiva@aaa.upv.es">Alvaro Fides Valero</a>
* @version $LastChangedRevision: 799 $ ($LastChangedDate: 2013-08-06 19:00:05 +0300 (Tue, 06 Aug 2013) $)
*/
public class ZB_ALLOW_BIND extends ZToolPacket /*implements IREQUEST,ISIMPLEAPI*/ {
/// <name>TI.ZPI2.ZB_ALLOW_BIND.Timeout</name>
/// <summary>The number of seconds ( max. 64 ) for which the device will remain in the Allow Bind mode ( If 0, the device will turn off Allow Bind mode immediately. If 0xFF, the device will remain in the mode indefinitely. )</summary>
public int Timeout;
/// <name>TI.ZPI2.ZB_ALLOW_BIND</name>
/// <summary>Constructor</summary>
public ZB_ALLOW_BIND() {
}
/// <name>TI.ZPI2.ZB_ALLOW_BIND</name>
/// <summary>Constructor</summary>
public ZB_ALLOW_BIND(int num1) {
this.Timeout = num1;
int[] framedata = {num1};
super.buildPacket(new DoubleByte(ZToolCMD.ZB_ALLOW_BIND), framedata);
}
}
| cdjackson/zigbee4java | zigbee-dongle-cc2531/src/main/java/org/bubblecloud/zigbee/network/packet/simple/ZB_ALLOW_BIND.java | Java | apache-2.0 | 2,297 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/codepipeline/model/StageTransitionType.h>
#include <aws/core/utils/HashingUtils.h>
#include <aws/core/Globals.h>
#include <aws/core/utils/EnumParseOverflowContainer.h>
using namespace Aws::Utils;
namespace Aws
{
namespace CodePipeline
{
namespace Model
{
namespace StageTransitionTypeMapper
{
static const int Inbound_HASH = HashingUtils::HashString("Inbound");
static const int Outbound_HASH = HashingUtils::HashString("Outbound");
StageTransitionType GetStageTransitionTypeForName(const Aws::String& name)
{
int hashCode = HashingUtils::HashString(name.c_str());
if (hashCode == Inbound_HASH)
{
return StageTransitionType::Inbound;
}
else if (hashCode == Outbound_HASH)
{
return StageTransitionType::Outbound;
}
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
overflowContainer->StoreOverflow(hashCode, name);
return static_cast<StageTransitionType>(hashCode);
}
return StageTransitionType::NOT_SET;
}
Aws::String GetNameForStageTransitionType(StageTransitionType enumValue)
{
switch(enumValue)
{
case StageTransitionType::Inbound:
return "Inbound";
case StageTransitionType::Outbound:
return "Outbound";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
}
return "";
}
}
} // namespace StageTransitionTypeMapper
} // namespace Model
} // namespace CodePipeline
} // namespace Aws
| JoyIfBam5/aws-sdk-cpp | aws-cpp-sdk-codepipeline/source/model/StageTransitionType.cpp | C++ | apache-2.0 | 2,503 |
package org.apache.archiva.configuration.model.functors;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.archiva.configuration.model.ProxyConnectorConfiguration;
import org.apache.commons.collections4.Predicate;
import org.apache.commons.lang3.StringUtils;
/**
* ProxyConnectorPredicate
*
*
*/
public class ProxyConnectorSelectionPredicate
implements Predicate<ProxyConnectorConfiguration>
{
private String sourceId;
private String targetId;
public ProxyConnectorSelectionPredicate( String sourceId, String targetId )
{
this.sourceId = sourceId;
this.targetId = targetId;
}
@Override
public boolean evaluate( ProxyConnectorConfiguration object )
{
boolean satisfies = false;
if ( object != null )
{
ProxyConnectorConfiguration connector = object;
return ( StringUtils.equals( sourceId, connector.getSourceRepoId() ) && StringUtils.equals( targetId,
connector.getTargetRepoId() ) );
}
return satisfies;
}
}
| apache/archiva | archiva-modules/archiva-base/archiva-configuration/archiva-configuration-model/src/main/java/org/apache/archiva/configuration/model/functors/ProxyConnectorSelectionPredicate.java | Java | apache-2.0 | 1,916 |
/*************************GO-LICENSE-START*********************************
* Copyright 2014 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*************************GO-LICENSE-END***********************************/
package com.thoughtworks.go.server.materials.postcommit.svn;
import com.thoughtworks.go.config.materials.svn.SvnMaterial;
import com.thoughtworks.go.domain.materials.Material;
import com.thoughtworks.go.domain.materials.svn.SvnCommand;
import com.thoughtworks.go.server.materials.postcommit.PostCommitHookImplementer;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public class SvnPostCommitHookImplementer implements PostCommitHookImplementer {
static final String UUID = "uuid";
@Override
public Set<Material> prune(Set<Material> materials, Map params) {
final HashSet<Material> prunedMaterials = new HashSet<>();
if (params.containsKey(UUID)) {
final String targetUUID = (String) params.get(UUID);
final HashMap<String, String> urlToRemoteUUIDMap = createUrlToRemoteUUIDMap(materials);
for (Material material : materials) {
if (material instanceof SvnMaterial && isQualified(targetUUID, (SvnMaterial) material, urlToRemoteUUIDMap)) {
prunedMaterials.add(material);
}
}
}
return prunedMaterials;
}
boolean isQualified(String incomingUUID, SvnMaterial material, HashMap urlToUUIDMap) {
if (urlToUUIDMap.containsKey(material.urlForCommandLine())) {
final String remoteUUID = (String) urlToUUIDMap.get(material.urlForCommandLine());
return incomingUUID.equals(remoteUUID);
}
return false;
}
HashMap<String, String> createUrlToRemoteUUIDMap(Set<Material> materials) {
final HashSet<SvnMaterial> setOfSvnMaterials = new HashSet<>();
for (Material material : materials) {
if (material instanceof SvnMaterial) {
setOfSvnMaterials.add((SvnMaterial) material);
}
}
return getEmptySvnCommand().createUrlToRemoteUUIDMap(setOfSvnMaterials);
}
SvnCommand getEmptySvnCommand() {
return new SvnCommand(null, ".");
}
}
| varshavaradarajan/gocd | server/src/main/java/com/thoughtworks/go/server/materials/postcommit/svn/SvnPostCommitHookImplementer.java | Java | apache-2.0 | 2,811 |
/*
* Copyright 2003-2008 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.codehaus.groovy.transform;
/**
* Define a package-scoped interface.
* Fix for GROOVY-3380
*
* @author Guillaume Laforge
*/
interface NonPublicInterface {
String CONSTANT = "constant";
}
| Selventa/model-builder | tools/groovy/src/src/test/org/codehaus/groovy/transform/NonPublicInterface.java | Java | apache-2.0 | 828 |
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* -Redistribution of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* -Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of Oracle nor the names of contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This software is provided "AS IS," without a warranty of any kind. ALL
* EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING
* ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
* OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN")
* AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE
* AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS
* DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST
* REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL,
* INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY
* OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE,
* EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
*
* You acknowledge that this software is not designed, licensed or intended
* for use in the design, construction, operation or maintenance of any
* nuclear facility.
*/
package jnlp.sample.servlet;
import java.io.*;
import java.util.*;
import jnlp.sample.jardiff.*;
import javax.servlet.*;
import javax.servlet.http.*;
import jnlp.sample.util.VersionString;
import java.net.URL;
/*
* A class that generates and caches information about JarDiff files
*
*/
public class JarDiffHandler {
// Default size of download buffer
private static final int BUF_SIZE = 32 * 1024;
// Default JARDiff mime type
private static final String JARDIFF_MIMETYPE = "application/x-java-archive-diff";
/** List of all generated JARDiffs */
private HashMap _jarDiffEntries = null;
/** Reference to ServletContext and logger object */
private static Logger _log = null;
private ServletContext _servletContext = null;
private String _jarDiffMimeType = null;
/* Contains information about a particular JARDiff entry */
private static class JarDiffKey implements Comparable{
private String _name; // Name of file
private String _fromVersionId; // From version
private String _toVersionId; // To version
private boolean _minimal; // True if this is a minimal jardiff
/** Constructor used to generate a query object */
public JarDiffKey(String name, String fromVersionId, String toVersionId, boolean minimal) {
_name = name;
_fromVersionId = fromVersionId;
_toVersionId = toVersionId;
_minimal = minimal;
}
// Query methods
public String getName() { return _name; }
public String getFromVersionId() { return _fromVersionId; }
public String getToVersionId() { return _toVersionId; }
public boolean isMinimal() { return _minimal; }
// Collection framework interface methods
public int compareTo(Object o) {
// All non JarDiff entries are less
if (!(o instanceof JarDiffKey)) return -1;
JarDiffKey other = (JarDiffKey)o;
int n = _name.compareTo(other.getName());
if (n != 0) return n;
n = _fromVersionId.compareTo(other.getFromVersionId());
if (n != 0) return n;
if (_minimal != other.isMinimal()) return -1;
return _toVersionId.compareTo(other.getToVersionId());
}
public boolean equals(Object o) {
return compareTo(o) == 0;
}
public int hashCode() {
return _name.hashCode() +
_fromVersionId.hashCode() +
_toVersionId.hashCode();
}
}
static private class JarDiffEntry {
private File _jardiffFile; // Location of JARDiff file
public JarDiffEntry(File jarDiffFile) {
_jardiffFile = jarDiffFile;
}
public File getJarDiffFile() { return _jardiffFile; }
}
/** Initialize JarDiff handler */
public JarDiffHandler(ServletContext servletContext, Logger log) {
_jarDiffEntries = new HashMap();
_servletContext = servletContext;
_log = log;
_jarDiffMimeType = _servletContext.getMimeType("xyz.jardiff");
if (_jarDiffMimeType == null) _jarDiffMimeType = JARDIFF_MIMETYPE;
}
/** Returns a JarDiff for the given request */
public synchronized DownloadResponse getJarDiffEntry(ResourceCatalog catalog, DownloadRequest dreq, JnlpResource res) {
if (dreq.getCurrentVersionId() == null) return null;
// check whether the request is from javaws 1.0/1.0.1
// do not generate minimal jardiff if it is from 1.0/1.0.1
boolean doJarDiffWorkAround = isJavawsVersion(dreq, "1.0*");
// First do a lookup to find a match
JarDiffKey key = new JarDiffKey(res.getName(),
dreq.getCurrentVersionId(),
res.getReturnVersionId(),
!doJarDiffWorkAround);
JarDiffEntry entry = (JarDiffEntry)_jarDiffEntries.get(key);
// If entry is not found, then the querty has not been made.
if (entry == null) {
if (_log.isInformationalLevel()) {
_log.addInformational("servlet.log.info.jardiff.gen",
res.getName(),
dreq.getCurrentVersionId(),
res.getReturnVersionId());
}
File f = generateJarDiff(catalog, dreq, res, doJarDiffWorkAround);
if (f == null) {
_log.addWarning("servlet.log.warning.jardiff.failed",
res.getName(),
dreq.getCurrentVersionId(),
res.getReturnVersionId());
}
// Store entry in table
entry = new JarDiffEntry(f);
_jarDiffEntries.put(key, entry);
}
// Check for no JarDiff to return
if (entry.getJarDiffFile() == null) {
return null;
} else {
return DownloadResponse.getFileDownloadResponse(entry.getJarDiffFile(),
_jarDiffMimeType,
entry.getJarDiffFile().lastModified(),
res.getReturnVersionId());
}
}
public static boolean isJavawsVersion(DownloadRequest dreq, String version) {
String javawsAgent = "javaws";
String jwsVer = dreq.getHttpRequest().getHeader("User-Agent");
// check the request is coming from javaws
if (!jwsVer.startsWith("javaws-")) {
// this is the new style User-Agent string
// User-Agent: JNLP/1.0.1 javaws/1.4.2 (b28) J2SE/1.4.2
StringTokenizer st = new StringTokenizer(jwsVer);
while (st.hasMoreTokens()) {
String verString = st.nextToken();
int index = verString.indexOf(javawsAgent);
if (index != -1) {
verString = verString.substring(index + javawsAgent.length() + 1);
return VersionString.contains(version, verString);
}
}
return false;
}
// extract the version id from the download request
int startIndex = jwsVer.indexOf("-");
if (startIndex == -1) {
return false;
}
int endIndex = jwsVer.indexOf("/");
if (endIndex == -1 || endIndex < startIndex) {
return false;
}
String verId = jwsVer.substring(startIndex + 1, endIndex);
// check whether the versionString contains the versionId
return VersionString.contains(version, verId);
}
/** Download resource to the given file */
private boolean download(URL target, File file) {
_log.addDebug("JarDiffHandler: Doing download");
boolean ret = true;
boolean delete = false;
// use bufferedstream for better performance
BufferedInputStream in = null;
BufferedOutputStream out = null;
try {
in = new BufferedInputStream(target.openStream());
out = new BufferedOutputStream(new FileOutputStream(file));
int read = 0;
int totalRead = 0;
byte[] buf = new byte[BUF_SIZE];
while ((read = in.read(buf)) != -1) {
out.write(buf, 0, read);
totalRead += read;
}
_log.addDebug("total read: " + totalRead);
_log.addDebug("Wrote URL " + target.toString() + " to file " + file);
} catch(IOException ioe) {
_log.addDebug("Got exception while downloading resource: " + ioe);
ret = false;
if (file != null) delete = true;
} finally {
try {
in.close();
in = null;
} catch (IOException ioe) {
_log.addDebug("Got exception while downloading resource: " + ioe);
}
try {
out.close();
out = null;
} catch (IOException ioe) {
_log.addDebug("Got exception while downloading resource: " + ioe);
}
if (delete) {
file.delete();
}
}
return ret;
}
// fix for 4720897
// if the jar file resides in a war file, download it to a temp dir
// so it can be used to generate jardiff
private String getRealPath(String path) throws IOException{
URL fileURL = _servletContext.getResource(path);
File tempDir = (File)_servletContext.getAttribute("javax.servlet.context.tempdir");
// download file into temp dir
if (fileURL != null) {
File newFile = File.createTempFile("temp", ".jar", tempDir);
if (download(fileURL, newFile)) {
String filePath = newFile.getPath();
return filePath;
}
}
return null;
}
private File generateJarDiff(ResourceCatalog catalog, DownloadRequest dreq, JnlpResource res, boolean doJarDiffWorkAround) {
boolean del_old = false;
boolean del_new = false;
// Lookup up file for request version
DownloadRequest fromDreq = dreq.getFromDownloadRequest();
try {
JnlpResource fromRes = catalog.lookupResource(fromDreq);
/* Get file locations */
String newFilePath = _servletContext.getRealPath(res.getPath());
String oldFilePath = _servletContext.getRealPath(fromRes.getPath());
// fix for 4720897
if (newFilePath == null) {
newFilePath = getRealPath(res.getPath());
if (newFilePath != null) del_new = true;
}
if (oldFilePath == null) {
oldFilePath = getRealPath(fromRes.getPath());
if (oldFilePath != null) del_old = true;
}
if (newFilePath == null || oldFilePath == null) {
return null;
}
// Create temp. file to store JarDiff file in
File tempDir = (File)_servletContext.getAttribute("javax.servlet.context.tempdir");
// fix for 4653036: JarDiffHandler() should use javax.servlet.context.tempdir to store the jardiff
File outputFile = File.createTempFile("jnlp", ".jardiff", tempDir);
_log.addDebug("Generating Jardiff between " + oldFilePath + " and " +
newFilePath + " Store in " + outputFile);
// Generate JarDiff
OutputStream os = new FileOutputStream(outputFile);
JarDiff.createPatch(oldFilePath, newFilePath, os, !doJarDiffWorkAround);
os.close();
try {
// Check that Jardiff is smaller, or return null
if (outputFile.length() >= (new File(newFilePath).length())) {
_log.addDebug("JarDiff discarded - since it is bigger");
return null;
}
// Check that Jardiff is smaller than the packed version of
// the new file, if the file exists at all
File newFilePacked = new File(newFilePath + ".pack.gz");
if (newFilePacked.exists()) {
_log.addDebug("generated jardiff size: " + outputFile.length());
_log.addDebug("packed requesting file size: " + newFilePacked.length());
if (outputFile.length() >= newFilePacked.length()) {
_log.addDebug("JarDiff discarded - packed version of requesting file is smaller");
return null;
}
}
_log.addDebug("JarDiff generation succeeded");
return outputFile;
} finally {
// delete the temporarily downloaded file
if (del_new) {
new File(newFilePath).delete();
}
if (del_old) {
new File(oldFilePath).delete();
}
}
} catch(IOException ioe) {
_log.addDebug("Failed to genereate jardiff", ioe);
return null;
} catch(ErrorResponseException ere) {
_log.addDebug("Failed to genereate jardiff", ere);
return null;
}
}
}
| WhiteBearSolutions/WBSAirback | packages/wbsairback-java/wbsairback-java-7.0.1/usr/share/wbsairback/java/sample/jnlp/servlet/src/classes/jnlp/sample/servlet/JarDiffHandler.java | Java | apache-2.0 | 14,422 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0) on Mon Jan 13 19:53:35 EST 2014 -->
<title>BasisSplineRegressionEngine</title>
<meta name="date" content="2014-01-13">
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="BasisSplineRegressionEngine";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/BasisSplineRegressionEngine.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-files/index-1.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../org/drip/regression/spline/BasisSplineRegressor.html" title="class in org.drip.regression.spline"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/drip/regression/spline/BasisSplineRegressionEngine.html" target="_top">Frames</a></li>
<li><a href="BasisSplineRegressionEngine.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#fields_inherited_from_class_org.drip.regression.core.RegressionEngine">Field</a> | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.drip.regression.spline</div>
<h2 title="Class BasisSplineRegressionEngine" class="title">Class BasisSplineRegressionEngine</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li><a href="../../../../org/drip/regression/core/RegressionEngine.html" title="class in org.drip.regression.core">org.drip.regression.core.RegressionEngine</a></li>
<li>
<ul class="inheritance">
<li>org.drip.regression.spline.BasisSplineRegressionEngine</li>
</ul>
</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<hr>
<br>
<pre>public class <span class="strong">BasisSplineRegressionEngine</span>
extends <a href="../../../../org/drip/regression/core/RegressionEngine.html" title="class in org.drip.regression.core">RegressionEngine</a></pre>
<div class="block">BasisSplineRegressionEngine implements the RegressionEngine class for the basis spline functionality.</div>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- =========== FIELD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="field_summary">
<!-- -->
</a>
<h3>Field Summary</h3>
<ul class="blockList">
<li class="blockList"><a name="fields_inherited_from_class_org.drip.regression.core.RegressionEngine">
<!-- -->
</a>
<h3>Fields inherited from class org.drip.regression.core.<a href="../../../../org/drip/regression/core/RegressionEngine.html" title="class in org.drip.regression.core">RegressionEngine</a></h3>
<code><a href="../../../../org/drip/regression/core/RegressionEngine.html#REGRESSION_DETAIL_MODULE_AGGREGATED">REGRESSION_DETAIL_MODULE_AGGREGATED</a>, <a href="../../../../org/drip/regression/core/RegressionEngine.html#REGRESSION_DETAIL_MODULE_UNIT_AGGREGATED">REGRESSION_DETAIL_MODULE_UNIT_AGGREGATED</a>, <a href="../../../../org/drip/regression/core/RegressionEngine.html#REGRESSION_DETAIL_MODULE_UNIT_DECOMPOSED">REGRESSION_DETAIL_MODULE_UNIT_DECOMPOSED</a>, <a href="../../../../org/drip/regression/core/RegressionEngine.html#REGRESSION_DETAIL_STATS">REGRESSION_DETAIL_STATS</a></code></li>
</ul>
</li>
</ul>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><strong><a href="../../../../org/drip/regression/spline/BasisSplineRegressionEngine.html#BasisSplineRegressionEngine(int, int)">BasisSplineRegressionEngine</a></strong>(int iNumRuns,
int iRegressionDetail)</code> </td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method_summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span>Methods</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static void</code></td>
<td class="colLast"><code><strong><a href="../../../../org/drip/regression/spline/BasisSplineRegressionEngine.html#main(java.lang.String[])">main</a></strong>(java.lang.String[] astrArgs)</code> </td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods_inherited_from_class_org.drip.regression.core.RegressionEngine">
<!-- -->
</a>
<h3>Methods inherited from class org.drip.regression.core.<a href="../../../../org/drip/regression/core/RegressionEngine.html" title="class in org.drip.regression.core">RegressionEngine</a></h3>
<code><a href="../../../../org/drip/regression/core/RegressionEngine.html#initRegressionEnv()">initRegressionEnv</a></code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods_inherited_from_class_java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="BasisSplineRegressionEngine(int, int)">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>BasisSplineRegressionEngine</h4>
<pre>public BasisSplineRegressionEngine(int iNumRuns,
int iRegressionDetail)
throws java.lang.Exception</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.lang.Exception</code></dd></dl>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method_detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="main(java.lang.String[])">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>main</h4>
<pre>public static void main(java.lang.String[] astrArgs)
throws java.lang.Exception</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.lang.Exception</code></dd></dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/BasisSplineRegressionEngine.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-files/index-1.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../org/drip/regression/spline/BasisSplineRegressor.html" title="class in org.drip.regression.spline"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/drip/regression/spline/BasisSplineRegressionEngine.html" target="_top">Frames</a></li>
<li><a href="BasisSplineRegressionEngine.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#fields_inherited_from_class_org.drip.regression.core.RegressionEngine">Field</a> | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| tectronics/splinelibrary | 2.3/docs/Javadoc/org/drip/regression/spline/BasisSplineRegressionEngine.html | HTML | apache-2.0 | 11,290 |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import random
import time
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo import messaging
from oslo.utils import timeutils
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy import sql
from neutron.common import constants
from neutron.common import utils as n_utils
from neutron import context as n_ctx
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import model_base
from neutron.extensions import l3agentscheduler
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
L3_AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('router_scheduler_driver',
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling '
'router to a default L3 agent')),
cfg.BoolOpt('router_auto_schedule', default=True,
help=_('Allow auto scheduling of routers to L3 agent.')),
cfg.BoolOpt('allow_automatic_l3agent_failover', default=False,
help=_('Automatically reschedule routers from offline L3 '
'agents to online L3 agents.')),
]
cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS)
class RouterL3AgentBinding(model_base.BASEV2):
"""Represents binding between neutron routers and L3 agents."""
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent = orm.relation(agents_db.Agent)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
agentschedulers_db.AgentSchedulerDbMixin):
"""Mixin class to add l3 agent scheduler extension to plugins
using the l3 agent for routing.
"""
router_scheduler = None
def start_periodic_agent_status_check(self):
if not cfg.CONF.allow_automatic_l3agent_failover:
LOG.info(_LI("Skipping period L3 agent status check because "
"automatic router rescheduling is disabled."))
return
self.periodic_agent_loop = loopingcall.FixedIntervalLoopingCall(
self.reschedule_routers_from_down_agents)
interval = max(cfg.CONF.agent_down_time / 2, 1)
# add random initial delay to allow agents to check in after the
# neutron server first starts. random to offset multiple servers
self.periodic_agent_loop.start(interval=interval,
initial_delay=random.randint(interval, interval * 2))
def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up."""
# give agents extra time to handle transient failures
agent_dead_limit = cfg.CONF.agent_down_time * 2
# check for an abrupt clock change since last check. if a change is
# detected, sleep for a while to let the agents check in.
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
timeutils.utcnow())
if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
LOG.warn(_LW("Time since last L3 agent reschedule check has "
"exceeded the interval between checks. Waiting "
"before check to allow agents to send a heartbeat "
"in case there was a clock adjustment."))
time.sleep(agent_dead_limit)
self._clock_jump_canary = timeutils.utcnow()
context = n_ctx.get_admin_context()
cutoff = timeutils.utcnow() - datetime.timedelta(
seconds=agent_dead_limit)
down_bindings = (
context.session.query(RouterL3AgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up).
outerjoin(l3_attrs_db.RouterExtraAttributes,
l3_attrs_db.RouterExtraAttributes.router_id ==
RouterL3AgentBinding.router_id).
filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
try:
for binding in down_bindings:
LOG.warn(_LW(
"Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in "
"the last %(dead_time)s seconds."),
{'router': binding.router_id,
'agent': binding.l3_agent_id,
'dead_time': agent_dead_limit})
try:
self.reschedule_router(context, binding.router_id)
except (l3agentscheduler.RouterReschedulingFailed,
messaging.RemoteError):
# Catch individual router rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule router %s"),
binding.router_id)
except db_exc.DBError:
# Catch DB errors here so a transient DB connectivity issue
# doesn't stop the loopingcall.
LOG.exception(_LE("Exception encountered during router "
"rescheduling."))
def validate_agent_router_combination(self, context, agent, router):
"""Validate if the router can be correctly assigned to the agent.
:raises: RouterL3AgentMismatch if attempting to assign DVR router
to legacy agent, or centralized router to compute's L3 agents.
:raises: InvalidL3Agent if attempting to assign router to an
unsuitable agent (disabled, type != L3, incompatible configuration)
:raises: DVRL3CannotAssignToDvrAgent if attempting to assign DVR
router from one DVR Agent to another.
"""
is_distributed = router.get('distributed')
agent_conf = self.get_configuration_dict(agent)
agent_mode = agent_conf.get('agent_mode', 'legacy')
router_type = ('distributed' if is_distributed else 'centralized')
is_agent_router_types_incompatible = (
agent_mode == 'dvr' and not is_distributed
or agent_mode == 'legacy' and is_distributed
)
if is_agent_router_types_incompatible:
raise l3agentscheduler.RouterL3AgentMismatch(
router_type=router_type, router_id=router['id'],
agent_mode=agent_mode, agent_id=agent['id'])
if agent_mode == 'dvr' and is_distributed:
raise l3agentscheduler.DVRL3CannotAssignToDvrAgent(
router_type=router_type, router_id=router['id'],
agent_id=agent['id'])
is_wrong_type_or_unsuitable_agent = (
agent['agent_type'] != constants.AGENT_TYPE_L3 or
not agent['admin_state_up'] or
not self.get_l3_agent_candidates(context, router, [agent])
)
if is_wrong_type_or_unsuitable_agent:
raise l3agentscheduler.InvalidL3Agent(id=agent['id'])
def check_agent_router_scheduling_needed(self, context, agent, router):
"""Check if the router scheduling is needed.
:raises: RouterHostedByL3Agent if router is already assigned
to a different agent.
:returns: True if scheduling is needed, otherwise False
"""
router_id = router['id']
agent_id = agent['id']
query = context.session.query(RouterL3AgentBinding)
bindings = query.filter_by(router_id=router_id).all()
if not bindings:
return True
for binding in bindings:
if binding.l3_agent_id == agent_id:
# router already bound to the agent we need
return False
if router.get('distributed'):
return False
# non-dvr case: centralized router is already bound to some agent
raise l3agentscheduler.RouterHostedByL3Agent(
router_id=router_id,
agent_id=bindings[0].l3_agent_id)
def create_router_to_agent_binding(self, context, agent, router):
"""Create router to agent binding."""
router_id = router['id']
agent_id = agent['id']
if self.router_scheduler:
try:
self.router_scheduler.bind_router(context, router_id, agent)
except db_exc.DBError:
raise l3agentscheduler.RouterSchedulingFailed(
router_id=router_id, agent_id=agent_id)
def add_router_to_l3_agent(self, context, agent_id, router_id):
"""Add a l3 agent to host a router."""
with context.session.begin(subtransactions=True):
router = self.get_router(context, router_id)
agent = self._get_agent(context, agent_id)
self.validate_agent_router_combination(context, agent, router)
if self.check_agent_router_scheduling_needed(
context, agent, router):
self.create_router_to_agent_binding(context, agent, router)
else:
return
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_added_to_agent(
context, [router_id], agent.host)
def remove_router_from_l3_agent(self, context, agent_id, router_id):
"""Remove the router from l3 agent.
After removal, the router will be non-hosted until there is update
which leads to re-schedule or be added to another agent manually.
"""
agent = self._get_agent(context, agent_id)
self._unbind_router(context, router_id, agent_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_removed_from_agent(
context, router_id, agent.host)
def _unbind_router(self, context, router_id, agent_id):
with context.session.begin(subtransactions=True):
query = context.session.query(RouterL3AgentBinding)
query = query.filter(
RouterL3AgentBinding.router_id == router_id,
RouterL3AgentBinding.l3_agent_id == agent_id)
try:
binding = query.one()
except exc.NoResultFound:
raise l3agentscheduler.RouterNotHostedByL3Agent(
router_id=router_id, agent_id=agent_id)
context.session.delete(binding)
def reschedule_router(self, context, router_id, candidates=None):
"""Reschedule router to a new l3 agent
Remove the router from the agent(s) currently hosting it and
schedule it again
"""
cur_agents = self.list_l3_agents_hosting_router(
context, router_id)['agents']
with context.session.begin(subtransactions=True):
for agent in cur_agents:
self._unbind_router(context, router_id, agent['id'])
new_agent = self.schedule_router(context, router_id,
candidates=candidates)
if not new_agent:
raise l3agentscheduler.RouterReschedulingFailed(
router_id=router_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
for agent in cur_agents:
l3_notifier.router_removed_from_agent(
context, router_id, agent['host'])
l3_notifier.router_added_to_agent(
context, [router_id], new_agent.host)
def list_routers_on_l3_agent(self, context, agent_id):
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id)
router_ids = [item[0] for item in query]
if router_ids:
return {'routers':
self.get_routers(context, filters={'id': router_ids})}
else:
return {'routers': []}
def list_active_sync_routers_on_active_l3_agent(
self, context, host, router_ids):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(
RouterL3AgentBinding.l3_agent_id == agent.id)
if router_ids:
query = query.filter(
RouterL3AgentBinding.router_id.in_(router_ids))
router_ids = [item[0] for item in query]
if router_ids:
if n_utils.is_extension_supported(self,
constants.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host,
router_ids=router_ids,
active=True)
else:
return self.get_sync_data(context, router_ids=router_ids,
active=True)
else:
return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
active=None):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
agents_db.AgentDbMixin.is_agent_down(
l3_agent['heartbeat_timestamp'])]
return l3_agents
def _get_l3_bindings_hosting_routers(self, context, router_ids):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
return query.all()
def list_l3_agents_hosting_router(self, context, router_id):
with context.session.begin(subtransactions=True):
bindings = self._get_l3_bindings_hosting_routers(
context, [router_id])
results = []
for binding in bindings:
l3_agent_dict = self._make_agent_dict(binding.l3_agent)
results.append(l3_agent_dict)
if results:
return {'agents': results}
else:
return {'agents': []}
def get_l3_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
agent_modes = filters.get('agent_modes', [])
if agent_modes:
agent_mode_key = '\"agent_mode\": \"'
configuration_filter = (
[agents_db.Agent.configurations.contains('%s%s\"' %
(agent_mode_key, agent_mode))
for agent_mode in agent_modes])
query = query.filter(or_(*configuration_filter))
return [l3_agent
for l3_agent in query
if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
active, l3_agent)]
def check_ports_exist_on_l3agent(self, context, l3_agent, router_id):
"""
This function checks for existence of dvr serviceable
ports on the host, running the input l3agent.
"""
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
core_plugin = manager.NeutronManager.get_plugin()
filter = {'fixed_ips': {'subnet_id': subnet_ids}}
ports = core_plugin.get_ports(context, filters=filter)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner']) and
l3_agent['host'] == port['binding:host_id']):
return True
return False
def get_snat_candidates(self, sync_router, l3_agents):
"""Get the valid snat enabled l3 agents for the distributed router."""
candidates = []
is_router_distributed = sync_router.get('distributed', False)
if not is_router_distributed:
return candidates
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
agent_mode = agent_conf.get('agent_mode', 'legacy')
if agent_mode != 'dvr_snat':
continue
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
if not use_namespaces and router_id != sync_router['id']:
continue
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
candidates.append(l3_agent)
return candidates
def get_l3_agent_candidates(self, context, sync_router, l3_agents):
"""Get the valid l3 agents for the router from a list of l3_agents."""
candidates = []
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
agent_mode = agent_conf.get('agent_mode', 'legacy')
if not use_namespaces and router_id != sync_router['id']:
continue
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
is_router_distributed = sync_router.get('distributed', False)
if agent_mode in ('legacy', 'dvr_snat') and (
not is_router_distributed):
candidates.append(l3_agent)
elif is_router_distributed and agent_mode.startswith('dvr') and (
self.check_ports_exist_on_l3agent(
context, l3_agent, sync_router['id'])):
candidates.append(l3_agent)
return candidates
def auto_schedule_routers(self, context, host, router_ids):
if self.router_scheduler:
return self.router_scheduler.auto_schedule_routers(
self, context, host, router_ids)
def schedule_router(self, context, router, candidates=None):
if self.router_scheduler:
return self.router_scheduler.schedule(
self, context, router, candidates=candidates)
def schedule_routers(self, context, routers):
"""Schedule the routers to l3 agents."""
for router in routers:
self.schedule_router(context, router, candidates=None)
def get_l3_agent_with_min_routers(self, context, agent_ids):
"""Return l3 agent with the least number of routers."""
query = context.session.query(
agents_db.Agent,
func.count(
RouterL3AgentBinding.router_id
).label('count')).outerjoin(RouterL3AgentBinding).group_by(
RouterL3AgentBinding.l3_agent_id).order_by('count')
res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
return res[0]
| projectcalico/calico-neutron | neutron/db/l3_agentschedulers_db.py | Python | apache-2.0 | 22,584 |
using System;
using System.Data;
using System.Data.SqlClient;
using System.Text;
using NServiceBus;
class Program
{
static void Main()
{
#region EndpointConfiguration
BusConfiguration busConfiguration = new BusConfiguration();
busConfiguration.UseTransport<SqlServerTransport>()
.ConnectionString(@"Data Source=.\SQLEXPRESS;Initial Catalog=samples;Integrated Security=True");
busConfiguration.EndpointName("Samples.SqlServer.NativeIntegration");
busConfiguration.UseSerialization<JsonSerializer>();
#endregion
busConfiguration.UsePersistence<InMemoryPersistence>();
using (Bus.Create(busConfiguration).Start())
{
Console.WriteLine("Press enter to send a message");
Console.WriteLine("Press any key to exit");
while (true)
{
ConsoleKeyInfo key = Console.ReadKey();
Console.WriteLine();
if (key.Key != ConsoleKey.Enter)
{
return;
}
PlaceOrder();
}
}
}
static void PlaceOrder()
{
#region MessagePayload
string message = @"{
$type: 'PlaceOrder',
OrderId: 'Order from ADO.net sender'
}";
#endregion
#region SendingUsingAdoNet
string connectionString = @"Data Source=.\SQLEXPRESS;Initial Catalog=samples;Integrated Security=True";
using (SqlConnection connection = new SqlConnection(connectionString))
{
connection.Open();
string insertSql = @"INSERT INTO [Samples.SqlServer.NativeIntegration] ([Id],[Recoverable],[Headers],[Body]) VALUES (@Id,@Recoverable,@Headers,@Body)";
using (SqlCommand command = new SqlCommand(insertSql, connection))
{
command.CommandType = CommandType.Text;
command.Parameters.Add("Id", SqlDbType.UniqueIdentifier).Value = Guid.NewGuid();
command.Parameters.Add("Headers", SqlDbType.VarChar).Value = "";
command.Parameters.Add("Body", SqlDbType.VarBinary).Value = Encoding.UTF8.GetBytes(message);
command.Parameters.Add("Recoverable", SqlDbType.Bit).Value = true;
command.ExecuteNonQuery();
}
}
#endregion
}
}
| pedroreys/docs.particular.net | samples/sqltransport/native-integration/Version_2/Receiver/Program.cs | C# | apache-2.0 | 2,460 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.