text stringlengths 2 1.04M | meta dict |
|---|---|
package ezvcard.io.scribe;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.xml.transform.OutputKeys;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
import ezvcard.VCardDataType;
import ezvcard.VCardVersion;
import ezvcard.io.CannotParseException;
import ezvcard.io.json.JCardValue;
import ezvcard.io.xml.XCardElement;
import ezvcard.parameter.VCardParameters;
import ezvcard.property.Xml;
import ezvcard.util.XmlUtils;
/**
* Marshals {@link Xml} properties.
* @author Michael Angstadt
*/
public class XmlScribe extends VCardPropertyScribe<Xml> {
public XmlScribe() {
super(Xml.class, "XML");
}
@Override
protected VCardDataType _defaultDataType(VCardVersion version) {
return VCardDataType.TEXT;
}
@Override
protected String _writeText(Xml property, VCardVersion version) {
Document value = property.getValue();
if (value == null) {
return "";
}
String xml = valueToString(value);
return escape(xml);
}
@Override
protected Xml _parseText(String value, VCardDataType dataType, VCardVersion version, VCardParameters parameters, List<String> warnings) {
value = unescape(value);
try {
return new Xml(value);
} catch (SAXException e) {
throw new CannotParseException("Cannot parse value as XML: " + value);
}
}
@Override
protected void _writeXml(Xml property, XCardElement element) {
//Xml properties are handled as a special case when writing xCard documents, so this method should never get called (see: "XCardDocument" class)
super._writeXml(property, element);
}
@Override
protected Xml _parseXml(XCardElement element, VCardParameters parameters, List<String> warnings) {
Xml xml = new Xml(element.element());
//remove the <parameters> element
Element root = XmlUtils.getRootElement(xml.getValue());
for (Element child : XmlUtils.toElementList(root.getChildNodes())) {
if ("parameters".equals(child.getLocalName()) && VCardVersion.V4_0.getXmlNamespace().equals(child.getNamespaceURI())) {
root.removeChild(child);
}
}
return xml;
}
@Override
protected JCardValue _writeJson(Xml property) {
String xml = null;
Document value = property.getValue();
if (value != null) {
xml = valueToString(value);
}
return JCardValue.single(xml);
}
@Override
protected Xml _parseJson(JCardValue value, VCardDataType dataType, VCardParameters parameters, List<String> warnings) {
try {
String xml = value.asSingle();
return (xml == null) ? new Xml((Document) null) : new Xml(xml);
} catch (SAXException e) {
throw new CannotParseException("Cannot parse value as XML: " + value);
}
}
private String valueToString(Document document) {
Map<String, String> props = new HashMap<String, String>();
props.put(OutputKeys.OMIT_XML_DECLARATION, "yes");
return XmlUtils.toString(document, props);
}
} | {
"content_hash": "4aa366c74d6462de81c3171c901b9528",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 146,
"avg_line_length": 27.235849056603772,
"alnum_prop": 0.7343262902667128,
"repo_name": "hongnguyenpro/ez-vcard",
"id": "2c4acfd4233ce966bd4f946bb54f708572387017",
"size": "4213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/main/java/ezvcard/io/scribe/XmlScribe.java",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "19852"
},
{
"name": "Java",
"bytes": "2116055"
}
],
"symlink_target": ""
} |
#pragma once
#include <csv.h>
#include <vector>
#include <iostream>
#include <boost/lexical_cast.hpp>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <typeinfo>
namespace csv {
namespace detail {
struct Result {
int number{0};
std::string message{"success"};
operator bool() const { return number == 0; }
};
class MappedFile {
private:
int fd_;
size_t size_;
char* data_;
public:
MappedFile(const std::string& filename) : fd_(0), size_(0), data_(NULL) {
fd_ = ::open(filename.c_str(), O_RDONLY);
if (fd_ < 0) {
set_error();
return;
}
struct stat statbuf;
if (::fstat(fd_, &statbuf) < 0) {
set_error();
return;
}
size_ = statbuf.st_size;
data_ = (char*)::mmap(NULL, size_, PROT_READ, MAP_PRIVATE, fd_, 0);
if (data_ == NULL) {
set_error();
return;
}
}
~MappedFile() {
::munmap(data_, size_);
::close(fd_);
}
const char* begin() const { return data_; }
const char* end() const { return data_ + size_; }
operator bool() { return status; }
detail::Result status;
private:
void set_error() {
status.number = errno;
status.message = strerror(errno);
}
};
// generates sequence numbers used in template expansion
namespace sequence {
template <int... Is>
struct index {};
template <int N, int... Is>
struct generate : generate<N - 1, N - 1, Is...> {};
template <int... Is>
struct generate<0, Is...> : index<Is...> {};
} // namespace sequence
namespace meta {
template <class F>
class fields;
// function pointer
template <class R, class... Args>
class fields<R (*)(Args...)> : public fields<R(Args...)> {};
// member function pointer
template <class C, class R, class... Args>
class fields<R (C::*)(Args...)> : public fields<R(Args...)> {};
// const member function pointer
template <class C, class R, class... Args>
class fields<R (C::*)(Args...) const> : public fields<R(Args...)> {};
// member object pointer
template <class C, class R>
class fields<R(C::*)> : public fields<R(C&)> {};
// functor
template <class F>
class fields : public fields<decltype(&F::operator())> {};
// reference
template <class F>
class fields<F&> : public fields<F> {};
// perfect reference
template <class F>
class fields<F&&> : public fields<F> {};
// impl
template <class R, class... Args>
class fields<R(Args...)> {
using mutator_t = std::function<void(const char* buf, size_t len)>;
public:
fields() {
setupFieldHandlers(
typename detail::sequence::generate<sizeof...(Args)>::index());
}
void accept_field(size_t field_pos, const char* buf, size_t len) {
if (field_pos < mutators.size()) {
mutators[field_pos](buf, len);
}
}
template <typename F>
void accept_row(F& sink) {
call_func(sink,
typename detail::sequence::generate<sizeof...(Args)>::index());
}
template <typename F, int... S>
void call_func(F& sink, detail::sequence::index<S...>) {
sink(std::get<S>(values)...);
}
private:
std::tuple<typename std::decay<Args>::type...> values;
std::vector<mutator_t> mutators;
private:
template <int... S>
void setupFieldHandlers(detail::sequence::index<S...>) {
setupFieldHandlers(std::get<S>(values)...);
}
template <typename F, typename... Fa>
void setupFieldHandlers(F& arg, Fa&... args) {
size_t field_num = mutators.size();
mutators.push_back([field_num, &arg](const char* buf, size_t len) {
if (len > 0) {
arg = boost::lexical_cast<F>(buf, len);
} else {
arg = F();
}
});
setupFieldHandlers(args...);
}
void setupFieldHandlers() {
// this is the terminal function for recursive template expansion
}
};
} // namespace meta
} // detail
/* A C++ wrapper around libcsv, see `make_parser` below */
struct filter_result {
bool drop;
constexpr filter_result(bool b) : drop(b) {}
operator bool() const { return drop; }
};
static constexpr filter_result ROW_DROP{true};
static constexpr filter_result ROW_OK{false};
template <typename F>
class CsvParser {
using this_type = CsvParser<F>;
public:
// return true if field should cause row to be ignored
using filter_function_type = std::function<
filter_result(size_t field_num, const char* buf, size_t len)>;
using error_callback_type = std::function<
filter_result(size_t line_number, size_t field_number,
const std::string& error_message, std::exception_ptr ex)>;
CsvParser(const F& sink) : sink{sink} { csv_init(&parser, 0); }
~CsvParser() { csv_free(&parser); }
//
void set_delim_char(unsigned char delim) { parser.delim_char = delim; }
void set_quote_char(unsigned char quote) { parser.quote_char = quote; }
void set_skip_header() { skip_row = true; }
void set_error_func(const error_callback_type func) { error_func = func; }
void set_comment_mark(const std::string& prefix) {
auto is_comment = [prefix](size_t field_num, const char* buf, size_t len) {
return field_num == 0 && //
len >= prefix.length() && //
std::equal(prefix.begin(), prefix.end(), buf);
};
return add_row_filter(is_comment);
}
/* Limitation: Fields are coerced to their types as they are
* encountered, so these filters can't prevent conversion by
* looking at data later in the same row. */
void add_row_filter(const filter_function_type filter) {
auto orig = filter_func;
filter_func = [orig, filter](size_t field_num, const char* buf,
size_t len) {
return orig(field_num, buf, len) || filter(field_num, buf, len);
};
}
//
bool ParseFile(const std::string& filename) {
detail::MappedFile data(filename);
if (!data) {
return status = data.status;
}
return Parse(data.begin(), data.end()) && Finish();
}
template <typename T>
bool Parse(const T& str) {
return Parse(str.data(), str.data() + str.length());
}
template <typename It>
bool Parse(const It& begin, const It& end) {
csv_parse(&parser, begin, end - begin, on_field, on_record, this);
return update_status();
}
template <typename IoStream>
bool ParseStream(IoStream& input) {
char buf[4096];
do {
input.read(buf, sizeof(buf));
csv_parse(&parser, buf, input.gcount(), on_field, on_record, this);
} while (input && update_status());
return Finish();
}
bool Finish() {
csv_fini(&parser, on_field, on_record, this);
return update_status();
}
const std::string& ErrorString() { return status.message; }
operator bool() { return status; }
private:
static void on_field(void* data, size_t len, void* this_ptr) {
this_type* t = reinterpret_cast<this_type*>(this_ptr);
t->accept_field((char*)data, len);
}
static void on_record(int, void* this_ptr) {
this_type* t = reinterpret_cast<this_type*>(this_ptr);
t->accept_row();
};
private:
detail::meta::fields<F> fields;
csv_parser parser;
const F& sink;
detail::Result status;
filter_function_type filter_func = [](size_t, const char*,
size_t) { return ROW_OK; };
error_callback_type error_func = [](size_t row, size_t column,
const std::string& err,
const std::exception_ptr) {
std::cerr << "[csv.hpp] Exception at row " << row << ", column " << column
<< ": " << err << "\n";
return ROW_DROP;
};
bool skip_row{false};
size_t current_line = 0;
size_t current_field = 0;
private:
void accept_field(const char* buf, size_t len) {
skip_row = skip_row || filter_func(current_field, buf, len);
if (!skip_row) {
try {
fields.accept_field(current_field, buf, len);
}
catch (std::exception& e) {
skip_row = error_func(current_line + 1, current_field + 1, e.what(),
std::current_exception());
}
}
++current_field;
}
void accept_row() {
if (!skip_row) {
fields.accept_row(sink);
} else {
skip_row = false;
}
current_field = 0;
++current_line;
}
const detail::Result& update_status() {
if (status.number == 0 && parser.status != 0) {
status.number = parser.status;
status.message = csv_error(&parser);
}
return status;
}
};
template <typename F>
CsvParser<F> make_parser(F&& f) {
return CsvParser<F>(f);
}
// used to ignore input fields
struct ignore {};
} // namespace csv
namespace boost {
template <>
inline csv::ignore lexical_cast(const char*, size_t) {
static csv::ignore instance;
return instance;
}
}
| {
"content_hash": "40aee69d885dd8164abc1cadb07af7dc",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 79,
"avg_line_length": 27.579617834394906,
"alnum_prop": 0.6051963048498845,
"repo_name": "suspend0/csvpp",
"id": "5b68b810eee8e880b68bd0c44144b507b61aa55a",
"size": "8660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csv.hpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "18902"
},
{
"name": "Makefile",
"bytes": "58"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>UnitTestHelper - FAKE - F# Make</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="">
<meta name="author" content="Steffen Forkmann, Mauricio Scheffer, Colin Bull">
<script src="https://code.jquery.com/jquery-1.8.0.js"></script>
<script src="https://code.jquery.com/ui/1.8.23/jquery-ui.js"></script>
<script src="https://netdna.bootstrapcdn.com/twitter-bootstrap/2.2.1/js/bootstrap.min.js"></script>
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link href="https://netdna.bootstrapcdn.com/twitter-bootstrap/2.2.1/css/bootstrap-combined.min.css" rel="stylesheet">
<link type="text/css" rel="stylesheet" href="http://fsharp.github.io/FAKE/content/style.css" />
<script type="text/javascript" src="http://fsharp.github.io/FAKE/content/tips.js"></script>
<!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
</head>
<body>
<div class="container">
<div class="masthead">
<ul class="nav nav-pills pull-right">
<li><a href="http://fsharp.org">fsharp.org</a></li>
<li><a href="http://github.com/fsharp/fake">github page</a></li>
</ul>
<h3 class="muted"><a href="http://fsharp.github.io/FAKE/index.html">FAKE - F# Make</a></h3>
</div>
<hr />
<div class="row">
<div class="span9" id="main">
<h1>UnitTestHelper</h1>
<div class="xmldoc">
<p>This module contains functions which allow to report unit test results to build servers.</p>
</div>
<!-- Render nested types and modules, if there are any -->
<h2>Nested types and modules</h2>
<div>
<table class="table table-bordered type-list">
<thead>
<tr><td>Type</td><td>Description</td></tr>
</thead>
<tbody>
<tr>
<td class="type-name">
<a href="fake-unittesthelper-test.html">Test</a>
</td>
<td class="xmldoc"><p>Basic data type to represent tests</p>
</td>
</tr>
<tr>
<td class="type-name">
<a href="fake-unittesthelper-testresults.html">TestResults</a>
</td>
<td class="xmldoc"><p>Basic data type to represent test results</p>
</td>
</tr>
<tr>
<td class="type-name">
<a href="fake-unittesthelper-teststatus.html">TestStatus</a>
</td>
<td class="xmldoc"><p>Basic data type to represent test status</p>
</td>
</tr>
</tbody>
</table>
</div>
<h3>Functions and values</h3>
<table class="table table-bordered member-list">
<thead>
<tr><td>Function or value</td><td>Description</td></tr>
</thead>
<tbody>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '595', 595)" onmouseover="showTip(event, '595', 595)">
reportTestResults testResults
</code>
<div class="tip" id="595">
<strong>Signature:</strong> testResults:TestResults -> unit<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/UnitTest/UnitTestHelper.fs#L62-62" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
<p>Reports the given test results to the detected build server</p>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '596', 596)" onmouseover="showTip(event, '596', 596)">
reportToAppVeyor testResults
</code>
<div class="tip" id="596">
<strong>Signature:</strong> testResults:TestResults -> unit<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/UnitTest/UnitTestHelper.fs#L49-49" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
<p>Reports the given test results to <a href="http://www.appveyor.com/">AppVeyor</a>.</p>
</td>
</tr>
<tr>
<td class="member-name">
<code onmouseout="hideTip(event, '597', 597)" onmouseover="showTip(event, '597', 597)">
reportToTeamCity testResults
</code>
<div class="tip" id="597">
<strong>Signature:</strong> testResults:TestResults -> unit<br />
</div>
</td>
<td class="xmldoc">
<a href="https://github.com/fsharp/FAKE/blob/master/src/app/FakeLib/UnitTest/UnitTestHelper.fs#L33-33" class="github-link">
<img src="../content/img/github.png" class="normal" />
<img src="../content/img/github-blue.png" class="hover" />
</a>
<p>Reports the given test results to <a href="http://www.jetbrains.com/teamcity/">TeamCity</a>.</p>
</td>
</tr>
</tbody>
</table>
</div>
<div class="span3">
<a href="http://fsharp.github.io/FAKE/index.html">
<img src="http://fsharp.github.io/FAKE/pics/logo.png" style="width:140px;height:140px;margin:10px 0px 0px 35px;border-style:none;" />
</a>
<ul class="nav nav-list" id="menu">
<li class="nav-header">FAKE - F# Make</li>
<li class="divider"></li>
<li><a href="http://fsharp.github.io/FAKE/index.html">Home page</a></li>
<li class="divider"></li>
<li><a href="https://www.nuget.org/packages/FAKE">Get FAKE - F# Make via NuGet</a></li>
<li><a href="http://github.com/fsharp/fake">Source Code on GitHub</a></li>
<li><a href="http://github.com/fsharp/fake/blob/master/License.txt">License (MS-PL)</a></li>
<li><a href="http://fsharp.github.io/FAKE/RELEASE_NOTES.html">Release Notes</a></li>
<li><a href="http://fsharp.github.io/FAKE//contributing.html">Contributing to FAKE - F# Make</a></li>
<li><a href="http://fsharp.github.io/FAKE/users.html">Who is using FAKE?</a></li>
<li><a href="http://stackoverflow.com/questions/tagged/f%23-fake">Ask a question</a></li>
<li class="nav-header">Tutorials</li>
<li><a href="http://fsharp.github.io/FAKE/gettingstarted.html">Getting started</a></li>
<li class="divider"></li>
<li><a href="http://fsharp.github.io/FAKE/nuget.html">NuGet package restore</a></li>
<li><a href="http://fsharp.github.io/FAKE/fxcop.html">Using FxCop in a build</a></li>
<li><a href="http://fsharp.github.io/FAKE/assemblyinfo.html">Generating AssemblyInfo</a></li>
<li><a href="http://fsharp.github.io/FAKE/create-nuget-package.html">Create NuGet packages</a></li>
<li><a href="http://fsharp.github.io/FAKE/specifictargets.html">Running specific targets</a></li>
<li><a href="http://fsharp.github.io/FAKE/commandline.html">Running FAKE from command line</a></li>
<li><a href="http://fsharp.github.io/FAKE/fsc.html">Using the F# compiler from FAKE</a></li>
<li><a href="http://fsharp.github.io/FAKE/customtasks.html">Creating custom tasks</a></li>
<li><a href="http://fsharp.github.io/FAKE/teamcity.html">TeamCity integration</a></li>
<li><a href="http://fsharp.github.io/FAKE/canopy.html">Running canopy tests</a></li>
<li><a href="http://fsharp.github.io/FAKE/octopusdeploy.html">Octopus Deploy</a></li>
<li><a href="http://fsharp.github.io/FAKE/typescript.html">TypeScript support</a></li>
<li class="divider"></li>
<li><a href="http://fsharp.github.io/FAKE/deploy.html">Fake.Deploy</a></li>
<li class="nav-header">Reference</li>
<li><a href="http://fsharp.github.io/FAKE/apidocs/index.html">API Reference</a></li>
</ul>
</div>
</div>
</div>
<a href="http://github.com/fsharp/fake"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_gray_6d6d6d.png" alt="Fork me on GitHub"></a>
</body>
</html>
| {
"content_hash": "a27bf924e00317d463925004c7bf85ea",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 209,
"avg_line_length": 45.052083333333336,
"alnum_prop": 0.5796531791907514,
"repo_name": "mwissman/MingleTransitionMonitor",
"id": "f0f63a3b6187c25c66d5026ca41b93e0d15188bd",
"size": "8650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MingleTransitionMonitor/packages/FAKE.3.9.8/docs/apidocs/fake-unittesthelper.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "86697"
},
{
"name": "CSS",
"bytes": "4203"
},
{
"name": "F#",
"bytes": "4301"
},
{
"name": "JavaScript",
"bytes": "1295"
},
{
"name": "Shell",
"bytes": "183"
},
{
"name": "XSLT",
"bytes": "33710"
}
],
"symlink_target": ""
} |
#ifndef _MEGA_COMMON_H_
#define _MEGA_COMMON_H_
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <asm/semaphore.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#define LSI_MAX_CHANNELS 16
#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
#define HBA_SIGNATURE_64_BIT 0x299
#define PCI_CONF_AMISIG64 0xa4
#define MEGA_SCSI_INQ_EVPD 1
#define MEGA_INVALID_FIELD_IN_CDB 0x24
/**
* scb_t - scsi command control block
* @ccb : command control block for individual driver
* @list : list of control blocks
* @gp : general purpose field for LLDs
* @sno : all SCBs have a serial number
* @scp : associated scsi command
* @state : current state of scb
* @dma_dir : direction of data transfer
* @dma_type : transfer with sg list, buffer, or no data transfer
* @dev_channel : actual channel on the device
* @dev_target : actual target on the device
* @status : completion status
*
* This is our central data structure to issue commands the each driver.
* Driver specific data structures are maintained in the ccb field.
* scb provides a field 'gp', which can be used by LLD for its own purposes
*
* dev_channel and dev_target must be initialized with the actual channel and
* target on the controller.
*/
typedef struct {
caddr_t ccb;
struct list_head list;
unsigned long gp;
unsigned int sno;
struct scsi_cmnd *scp;
uint32_t state;
uint32_t dma_direction;
uint32_t dma_type;
uint16_t dev_channel;
uint16_t dev_target;
uint32_t status;
} scb_t;
/*
* SCB states as it transitions from one state to another
*/
#define SCB_FREE 0x0000 /* on the free list */
#define SCB_ACTIVE 0x0001 /* off the free list */
#define SCB_PENDQ 0x0002 /* on the pending queue */
#define SCB_ISSUED 0x0004 /* issued - owner f/w */
#define SCB_ABORT 0x0008 /* Got an abort for this one */
#define SCB_RESET 0x0010 /* Got a reset for this one */
/*
* DMA types for scb
*/
#define MRAID_DMA_NONE 0x0000 /* no data transfer for this command */
#define MRAID_DMA_WSG 0x0001 /* data transfer using a sg list */
#define MRAID_DMA_WBUF 0x0002 /* data transfer using a contiguous buffer */
/**
* struct adapter_t - driver's initialization structure
* @aram dpc_h : tasklet handle
* @pdev : pci configuration pointer for kernel
* @host : pointer to host structure of mid-layer
* @lock : synchronization lock for mid-layer and driver
* @quiescent : driver is quiescent for now.
* @outstanding_cmds : number of commands pending in the driver
* @kscb_list : pointer to the bulk of SCBs pointers for IO
* @kscb_pool : pool of free scbs for IO
* @kscb_pool_lock : lock for pool of free scbs
* @pend_list : pending commands list
* @pend_list_lock : exclusion lock for pending commands list
* @completed_list : list of completed commands
* @completed_list_lock : exclusion lock for list of completed commands
* @sglen : max sg elements supported
* @device_ids : to convert kernel device addr to our devices.
* @raid_device : raid adapter specific pointer
* @max_channel : maximum channel number supported - inclusive
* @max_target : max target supported - inclusive
* @max_lun : max lun supported - inclusive
* @unique_id : unique identifier for each adapter
* @irq : IRQ for this adapter
* @ito : internal timeout value, (-1) means no timeout
* @ibuf : buffer to issue internal commands
* @ibuf_dma_h : dma handle for the above buffer
* @uscb_list : SCB pointers for user cmds, common mgmt module
* @uscb_pool : pool of SCBs for user commands
* @uscb_pool_lock : exclusion lock for these SCBs
* @max_cmds : max outstanding commands
* @fw_version : firmware version
* @bios_version : bios version
* @max_cdb_sz : biggest CDB size supported.
* @ha : is high availability present - clustering
* @init_id : initiator ID, the default value should be 7
* @max_sectors : max sectors per request
* @cmd_per_lun : max outstanding commands per LUN
* @being_detached : set when unloading, no more mgmt calls
*
*
* mraid_setup_device_map() can be called anytime after the device map is
* available and MRAID_GET_DEVICE_MAP() can be called whenever the mapping is
* required, usually from LLD's queue entry point. The formar API sets up the
* MRAID_IS_LOGICAL(adapter_t *, struct scsi_cmnd *) to find out if the
* device in question is a logical drive.
*
* quiescent flag should be set by the driver if it is not accepting more
* commands
*
* NOTE: The fields of this structures are placed to minimize cache misses
*/
// amount of space required to store the bios and firmware version strings
#define VERSION_SIZE 16
typedef struct {
struct tasklet_struct dpc_h;
struct pci_dev *pdev;
struct Scsi_Host *host;
spinlock_t lock;
uint8_t quiescent;
int outstanding_cmds;
scb_t *kscb_list;
struct list_head kscb_pool;
spinlock_t kscb_pool_lock;
struct list_head pend_list;
spinlock_t pend_list_lock;
struct list_head completed_list;
spinlock_t completed_list_lock;
uint16_t sglen;
int device_ids[LSI_MAX_CHANNELS]
[LSI_MAX_LOGICAL_DRIVES_64LD];
caddr_t raid_device;
uint8_t max_channel;
uint16_t max_target;
uint8_t max_lun;
uint32_t unique_id;
int irq;
uint8_t ito;
caddr_t ibuf;
dma_addr_t ibuf_dma_h;
scb_t *uscb_list;
struct list_head uscb_pool;
spinlock_t uscb_pool_lock;
int max_cmds;
uint8_t fw_version[VERSION_SIZE];
uint8_t bios_version[VERSION_SIZE];
uint8_t max_cdb_sz;
uint8_t ha;
uint16_t init_id;
uint16_t max_sectors;
uint16_t cmd_per_lun;
atomic_t being_detached;
} adapter_t;
#define SCSI_FREE_LIST_LOCK(adapter) (&adapter->kscb_pool_lock)
#define USER_FREE_LIST_LOCK(adapter) (&adapter->uscb_pool_lock)
#define PENDING_LIST_LOCK(adapter) (&adapter->pend_list_lock)
#define COMPLETED_LIST_LOCK(adapter) (&adapter->completed_list_lock)
// conversion from scsi command
#define SCP2HOST(scp) (scp)->device->host // to host
#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state
#define SCP2CHANNEL(scp) (scp)->device->channel // to channel
#define SCP2TARGET(scp) (scp)->device->id // to target
#define SCP2LUN(scp) (scp)->device->lun // to LUN
// generic macro to convert scsi command and host to controller's soft state
#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0])
#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp))
#define MRAID_IS_LOGICAL(adp, scp) \
(SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0
#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \
(sdev->channel == (adp)->max_channel) ? 1 : 0
/**
* MRAID_GET_DEVICE_MAP - device ids
* @adp : adapter's soft state
* @scp : mid-layer scsi command pointer
* @p_chan : physical channel on the controller
* @target : target id of the device or logical drive number
* @islogical : set if the command is for the logical drive
*
* Macro to retrieve information about device class, logical or physical and
* the corresponding physical channel and target or logical drive number
*/
#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \
/* \
* Is the request coming for the virtual channel \
*/ \
islogical = MRAID_IS_LOGICAL(adp, scp); \
\
/* \
* Get an index into our table of drive ids mapping \
*/ \
if (islogical) { \
p_chan = 0xFF; \
target = \
(adp)->device_ids[(adp)->max_channel][SCP2TARGET(scp)]; \
} \
else { \
p_chan = ((adp)->device_ids[SCP2CHANNEL(scp)] \
[SCP2TARGET(scp)] >> 8) & 0xFF; \
target = ((adp)->device_ids[SCP2CHANNEL(scp)] \
[SCP2TARGET(scp)] & 0xFF); \
}
/*
* ### Helper routines ###
*/
#define LSI_DBGLVL mraid_debug_level // each LLD must define a global
// mraid_debug_level
#ifdef DEBUG
#if defined (_ASSERT_PANIC)
#define ASSERT_ACTION panic
#else
#define ASSERT_ACTION printk
#endif
#define ASSERT(expression) \
if (!(expression)) { \
ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
#expression, __FILE__, __LINE__, __FUNCTION__); \
}
#else
#define ASSERT(expression)
#endif
/**
* struct mraid_pci_blk - structure holds DMA memory block info
* @vaddr : virtual address to a memory block
* @dma_addr : DMA handle to a memory block
*
* This structure is filled up for the caller. It is the responsibilty of the
* caller to allocate this array big enough to store addresses for all
* requested elements
*/
struct mraid_pci_blk {
caddr_t vaddr;
dma_addr_t dma_addr;
};
#endif // _MEGA_COMMON_H_
// vim: set ts=8 sw=8 tw=78:
| {
"content_hash": "550932c45920e07ca57745e939a96bd8",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 77,
"avg_line_length": 32.141304347826086,
"alnum_prop": 0.6883102243264569,
"repo_name": "ut-osa/laminar",
"id": "26e1e6c55654e0a71b4e272eac98ec6a43ec90fd",
"size": "9334",
"binary": false,
"copies": "52",
"ref": "refs/heads/master",
"path": "linux-2.6.22.6/drivers/scsi/megaraid/mega_common.h",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "4526"
},
{
"name": "Assembly",
"bytes": "7753785"
},
{
"name": "Awk",
"bytes": "5239"
},
{
"name": "Bison",
"bytes": "75151"
},
{
"name": "C",
"bytes": "209779557"
},
{
"name": "C++",
"bytes": "5954668"
},
{
"name": "CSS",
"bytes": "11885"
},
{
"name": "Java",
"bytes": "12132154"
},
{
"name": "Makefile",
"bytes": "731243"
},
{
"name": "Objective-C",
"bytes": "564040"
},
{
"name": "Perl",
"bytes": "196100"
},
{
"name": "Python",
"bytes": "11786"
},
{
"name": "Ruby",
"bytes": "3219"
},
{
"name": "Scala",
"bytes": "12158"
},
{
"name": "Scilab",
"bytes": "22980"
},
{
"name": "Shell",
"bytes": "205177"
},
{
"name": "TeX",
"bytes": "62636"
},
{
"name": "UnrealScript",
"bytes": "20822"
},
{
"name": "XSLT",
"bytes": "6544"
}
],
"symlink_target": ""
} |
<?php
/**
* @package go\DB
*/
namespace go\DB;
/**
* Compatibility with older versions
*
* @author Oleg Grigoriev <go.vasac@gmail.com>
*/
class Compat
{
/**
* Sets a compatibility option
*
* @param string $key
* @param mixed $value
*/
public static function setOpt($key, $value)
{
self::$opts[$key] = $value;
}
/**
* Sets a list of compatibility options for the current connection
*
* @param array $opts
*/
public static function setCurrentOpts(array $opts)
{
self::$current = $opts;
}
/**
* Returns a compatibility option by its key
*
* @param string $key
* @param mixed $default [optional]
* @return mixed
*/
public static function getOpt($key, $default = null)
{
if (\array_key_exists($key, self::$current)) {
return self::$current[$key];
}
if (\array_key_exists($key, self::$opts)) {
return self::$opts[$key];
}
return $default;
}
/**
* @var array
*/
private static $opts = array(
'null' => true,
);
/**
* @var array
*/
private static $current = array();
}
| {
"content_hash": "097750cee9d8a652e8397d1fd31d8bb1",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 70,
"avg_line_length": 18.83076923076923,
"alnum_prop": 0.5179738562091504,
"repo_name": "mundiir/kostylev",
"id": "156ff47fbaeb53474a3f2948bdcbbbf6883f14be",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/goDB/Compat.php",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "131288"
},
{
"name": "JavaScript",
"bytes": "67865"
},
{
"name": "PHP",
"bytes": "1254728"
}
],
"symlink_target": ""
} |
sudo ln -s /vagrant/env.sh /etc/profile.d/env.sh
. /etc/profile.d/env.sh
yum install -y git unzip
# les sources du kernel linux et de glibc
yum install -y kernel-devel glibc-devel
# les debuginfo (les symbols) du kernel linux et de glibc
yum install --enablerepo=base-debuginfo -y kernel-debuginfo.x86_64 kernel-debuginfo-common-x86_64.x86_64 kernel-tools-debuginfo.x86_64 glibc-debuginfo
# perf est un outil fourni dans le kernel linux pour capturer les d'evenements system
yum install -y perf audit-libs-python
#la jdk 1.8.66
yum install -y java-1.8.0-openjdk-devel
# les debuginfo (les symbols) de openjdk
yum install --enablerepo=base-debuginfo -y java-1.8.0-openjdk-debuginfo
#Projet FlameGraph de brendangregg
[ -d /opt/FlameGraph ] || git clone https://github.com/brendangregg/FlameGraph /opt/FlameGraph
# Projet perf-map-agent
# agent java permettant d'extraire les symbols de la JVM
[ -d /opt/perf-map-agent ] || git clone https://github.com/jrudolph/perf-map-agent /opt/perf-map-agent
yum -y install cmake gcc-c++
cd /opt/perf-map-agent
# construction de code c contenue dans l'agent java
export JAVA_HOME=/etc/alternatives/java_sdk
cmake .
make
# installation d'un ensemble de commandes dont "perf-java-flames"
bin/create-links-in /usr/bin
# Gradle
[ -f /tmp/gradle-2.12-all.zip ] || wget -O /tmp/gradle-2.12-all.zip https://services.gradle.org/distributions/gradle-2.12-all.zip
cd /opt
unzip /tmp/gradle-2.12-all.zip
# Docker
sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/$releasever/
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF
sudo yum install -y docker-engine
sudo service docker start
| {
"content_hash": "65fbc707e8fe33e67d4d5c4218857906",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 153,
"avg_line_length": 33.44230769230769,
"alnum_prop": 0.7550316273720529,
"repo_name": "npeters/devoxx-flamegraph",
"id": "7f6dfab01f70ac5a0077335ce07ccb21751ec17e",
"size": "1760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootstrap.sh",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "26697"
},
{
"name": "Perl",
"bytes": "6688"
},
{
"name": "Python",
"bytes": "1244"
},
{
"name": "Shell",
"bytes": "5550"
}
],
"symlink_target": ""
} |
//
// TKScrollPageViewController.h
// XiaoNongDingClient
//
// Created by YueWen on 2017/9/4.
// Copyright © 2017年 ryden. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "LLSegmentBar.h"
NS_ASSUME_NONNULL_BEGIN
/// UIPageViewControllerTransitionStyleScroll 模式下的UIPageViewController
@interface TKScrollPageViewController : UIPageViewController
/// 当前的控制器
@property (nonatomic, weak, nullable) UIViewController *currentViewController;
/// 当前控制器的索引
@property (nonatomic, assign, readonly) NSInteger currentIndex;
/// 涵盖的viewControllers
@property (nonatomic, copy)NSArray <__kindof UIViewController *> *contentViewControllers;
@end
@class TKScrollHorizontalPageViewController;
@protocol TKScrollHorizontalPageDelegate <NSObject>
@optional
/**
TKScrollHorizontalPageViewController 将要变为第几个控制器
@param viewController TKScrollHorizontalPageViewController
@param index 当前控制器的index
*/
- (void)tk_scrollHorizontalPageViewController:(TKScrollHorizontalPageViewController *)viewController
willToIndex:(NSInteger)index;
@end
@interface TKScrollHorizontalPageViewController : TKScrollPageViewController <LLSegmentBarDelegate>
/// 代理
@property (nonatomic, weak, nullable) id<TKScrollHorizontalPageDelegate> tk_delegate;
/// 控制器
@property (nonatomic, strong) LLSegmentBar * segmentBar;
/// 导航栏的pop手势
@property (nonatomic, weak)UIPanGestureRecognizer *popPanGestureRecognizer;
@end
@interface TKScrollVerticalPageViewController : TKScrollPageViewController
@end
@interface UIPageViewController (TKScrollView)
/// 滚动视图
@property (nonatomic, weak, nullable, readonly) UIScrollView *tk_scrollView;
/// 滚动视图的滑动手势
@property (nonatomic, strong, nullable, readonly) UIPanGestureRecognizer *tk_scrollPanGestureRecongnizer;
/// 滚动视图的所有手势
@property (nonatomic, copy, nullable, readonly) NSArray <UIGestureRecognizer *> *tk_gestureRecongnizers;
@end
@interface TKScrollPageViewController (UIPageViewControllerDataSource) <UIPageViewControllerDataSource>
@end
@interface TKScrollPageViewController (UIPageViewControllerDelegate) <UIPageViewControllerDelegate>
@end
NS_ASSUME_NONNULL_END
| {
"content_hash": "2fe85e0307e159de2fd45deedb321566",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 105,
"avg_line_length": 17.96694214876033,
"alnum_prop": 0.7879484820607175,
"repo_name": "RITL/TaoKeClient",
"id": "df71171bc2f4cbe791899eddaf5d4a5bc88f6ba6",
"size": "2317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TaoKeClient/Library/ScrollPageController/TKScrollPageViewController.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7721"
},
{
"name": "JavaScript",
"bytes": "160"
},
{
"name": "Objective-C",
"bytes": "262998"
}
],
"symlink_target": ""
} |
extern "C" {
#include <signal.h>
}
#include <cerrno>
#include <cstddef>
#include "utils/format/macros.hpp"
#include "utils/signals/exceptions.hpp"
namespace signals = utils::signals;
/// Number of the last valid signal.
const int utils::signals::last_signo = LAST_SIGNO;
/// Resets a signal handler to its default behavior.
///
/// \param signo The number of the signal handler to reset.
///
/// \throw signals::system_error If there is a problem trying to reset the
/// signal handler to its default behavior.
void
signals::reset(const int signo)
{
struct ::sigaction sa;
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
if (::sigaction(signo, &sa, NULL) == -1) {
const int original_errno = errno;
throw system_error(F("Failed to reset signal %s") % signo,
original_errno);
}
}
| {
"content_hash": "9b56a2f1c1bb94435e4fe3f0f43d1138",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 23.7027027027027,
"alnum_prop": 0.6476624857468644,
"repo_name": "ayyucedemirbas/Minix-Source-Code",
"id": "36e7966b27d9f841037de05ada520b7bd56ddade",
"size": "2521",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "minix-master/external/bsd/kyua-cli/dist/utils/signals/misc.cpp",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arc",
"bytes": "2839"
},
{
"name": "Assembly",
"bytes": "2637160"
},
{
"name": "Awk",
"bytes": "39398"
},
{
"name": "Brainfuck",
"bytes": "5114"
},
{
"name": "C",
"bytes": "45099473"
},
{
"name": "C++",
"bytes": "622042"
},
{
"name": "CSS",
"bytes": "254"
},
{
"name": "Emacs Lisp",
"bytes": "4528"
},
{
"name": "Groff",
"bytes": "6259659"
},
{
"name": "HTML",
"bytes": "57800"
},
{
"name": "IGOR Pro",
"bytes": "2975"
},
{
"name": "JavaScript",
"bytes": "20307"
},
{
"name": "Lex",
"bytes": "26563"
},
{
"name": "Limbo",
"bytes": "4037"
},
{
"name": "Logos",
"bytes": "14672"
},
{
"name": "Lua",
"bytes": "4385"
},
{
"name": "M4",
"bytes": "48559"
},
{
"name": "Makefile",
"bytes": "893777"
},
{
"name": "Max",
"bytes": "3667"
},
{
"name": "Objective-C",
"bytes": "26287"
},
{
"name": "Perl",
"bytes": "93049"
},
{
"name": "Perl6",
"bytes": "143"
},
{
"name": "Prolog",
"bytes": "97"
},
{
"name": "R",
"bytes": "764"
},
{
"name": "Rebol",
"bytes": "738"
},
{
"name": "Shell",
"bytes": "2194049"
},
{
"name": "Terra",
"bytes": "89"
},
{
"name": "Yacc",
"bytes": "137952"
}
],
"symlink_target": ""
} |
<?php
/**
* Classe créée pas Symfony pour gérer le bundle
*
* @author Juliana Leclaire <Juliana.Leclaire@etu.univ-savoie.fr>
* @author Céline de Roland <Celine.de-Roland@etu.univ-savoie.fr>
*
*/
namespace Sources\DebianBundle;
use Symfony\Component\HttpKernel\Bundle\Bundle;
/**
* Classe créée pas Symfony pour gérer le bundle
*
* @author Juliana Leclaire <Juliana.Leclaire@etu.univ-savoie.fr>
* @author Céline de Roland <Celine.de-Roland@etu.univ-savoie.fr>
*
*/
class SourcesDebianBundle extends Bundle
{
}
| {
"content_hash": "3069f5b4708f3f344d7cbe93fb35510a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 22.782608695652176,
"alnum_prop": 0.7347328244274809,
"repo_name": "celinederoland/DataNavigator",
"id": "798b7bed84a17859202778b55d7402e32d70592f",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Sources/DebianBundle/SourcesDebianBundle.php",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28856"
},
{
"name": "JavaScript",
"bytes": "134464"
},
{
"name": "PHP",
"bytes": "239761"
},
{
"name": "Perl",
"bytes": "415"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link rel="SHORTCUT ICON" href="../../../../../img/clover.ico" />
<link rel="stylesheet" href="../../../../../aui/css/aui.min.css" media="all"/>
<link rel="stylesheet" href="../../../../../aui/css/aui-experimental.min.css" media="all"/>
<!--[if IE 9]><link rel="stylesheet" href="../../../../../aui/css/aui-ie9.min.css" media="all"/><![endif]-->
<style type="text/css" media="all">
@import url('../../../../../style.css');
@import url('../../../../../tree.css');
</style>
<script src="../../../../../jquery-1.8.3.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui-experimental.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui-soy.min.js" type="text/javascript"></script>
<script src="../../../../../package-nodes-tree.js" type="text/javascript"></script>
<script src="../../../../../clover-tree.js" type="text/javascript"></script>
<script src="../../../../../clover.js" type="text/javascript"></script>
<script src="../../../../../clover-descriptions.js" type="text/javascript"></script>
<script src="../../../../../cloud.js" type="text/javascript"></script>
<title>ABA Route Transit Number Validator 1.0.1-SNAPSHOT</title>
</head>
<body>
<div id="page">
<header id="header" role="banner">
<nav class="aui-header aui-dropdown2-trigger-group" role="navigation">
<div class="aui-header-inner">
<div class="aui-header-primary">
<h1 id="logo" class="aui-header-logo aui-header-logo-clover">
<a href="http://openclover.org" title="Visit OpenClover home page"><span class="aui-header-logo-device">OpenClover</span></a>
</h1>
</div>
<div class="aui-header-secondary">
<ul class="aui-nav">
<li id="system-help-menu">
<a class="aui-nav-link" title="Open online documentation" target="_blank"
href="http://openclover.org/documentation">
<span class="aui-icon aui-icon-small aui-iconfont-help"> Help</span>
</a>
</li>
</ul>
</div>
</div>
</nav>
</header>
<div class="aui-page-panel">
<div class="aui-page-panel-inner">
<div class="aui-page-panel-nav aui-page-panel-nav-clover">
<div class="aui-page-header-inner" style="margin-bottom: 20px;">
<div class="aui-page-header-image">
<a href="http://cardatechnologies.com" target="_top">
<div class="aui-avatar aui-avatar-large aui-avatar-project">
<div class="aui-avatar-inner">
<img src="../../../../../img/clover_logo_large.png" alt="Clover icon"/>
</div>
</div>
</a>
</div>
<div class="aui-page-header-main" >
<h1>
<a href="http://cardatechnologies.com" target="_top">
ABA Route Transit Number Validator 1.0.1-SNAPSHOT
</a>
</h1>
</div>
</div>
<nav class="aui-navgroup aui-navgroup-vertical">
<div class="aui-navgroup-inner">
<ul class="aui-nav">
<li class="">
<a href="../../../../../dashboard.html">Project overview</a>
</li>
</ul>
<div class="aui-nav-heading packages-nav-heading">
<strong>Packages</strong>
</div>
<div class="aui-nav project-packages">
<form method="get" action="#" class="aui package-filter-container">
<input type="text" autocomplete="off" class="package-filter text"
placeholder="Type to filter packages..." name="package-filter" id="package-filter"
title="Start typing package name (or part of the name) to search through the tree. Use arrow keys and the Enter key to navigate."/>
</form>
<p class="package-filter-no-results-message hidden">
<small>No results found.</small>
</p>
<div class="packages-tree-wrapper" data-root-relative="../../../../../" data-package-name="com.cardatechnologies.utils.validators.abaroutevalidator">
<div class="packages-tree-container"></div>
<div class="clover-packages-lozenges"></div>
</div>
</div>
</div>
</nav> </div>
<section class="aui-page-panel-content">
<div class="aui-page-panel-content-clover">
<div class="aui-page-header-main"><ol class="aui-nav aui-nav-breadcrumbs">
<li><a href="../../../../../dashboard.html"> Project Clover database Sat Aug 7 2021 12:29:33 MDT</a></li>
<li><a href="test-pkg-summary.html">Package com.cardatechnologies.utils.validators.abaroutevalidator</a></li>
<li><a href="test-Test_AbaRouteValidator_08.html">Class Test_AbaRouteValidator_08</a></li>
</ol></div>
<h1 class="aui-h2-clover">
Test testAbaNumberCheck_16019_good
</h1>
<table class="aui">
<thead>
<tr>
<th>Test</th>
<th><label title="The test result. Either a Pass, Fail or Error.">Status</label></th>
<th><label title="When the test execution was started">Start time</label></th>
<th><label title="The total time in seconds taken to run this test.">Time (seconds)</label></th>
<th><label title="A failure or error message if the test is not successful.">Message</label></th>
</tr>
</thead>
<tbody>
<tr>
<td>
<a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/Test_AbaRouteValidator_08.html?line=24520#src-24520" >testAbaNumberCheck_16019_good</a>
</td>
<td>
<span class="sortValue">1</span><span class="aui-lozenge aui-lozenge-success">PASS</span>
</td>
<td>
7 Aug 12:38:23
</td>
<td>
0.0 </td>
<td>
<div></div>
<div class="errorMessage"></div>
</td>
</tr>
</tbody>
</table>
<div> </div>
<table class="aui aui-table-sortable">
<thead>
<tr>
<th style="white-space:nowrap;"><label title="A class that was directly hit by this test.">Target Class</label></th>
<th colspan="4"><label title="The percentage of coverage contributed by each single test.">Coverage contributed by</label> testAbaNumberCheck_16019_good</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<span class="sortValue">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</span>
  <a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/AbaRouteValidator.html?id=19348#AbaRouteValidator" title="AbaRouteValidator" name="sl-47">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</a>
</td>
<td>
<span class="sortValue">0.7352941</span>73.5%
</td>
<td class="align-middle" style="width: 100%" colspan="3">
<div>
<div title="73.5% Covered" style="min-width:40px;" class="barNegative contribBarNegative contribBarNegative"><div class="barPositive contribBarPositive contribBarPositive" style="width:73.5%"></div></div></div> </td>
</tr>
</tbody>
</table>
</div> <!-- class="aui-page-panel-content-clover" -->
<footer id="footer" role="contentinfo">
<section class="footer-body">
<ul>
<li>
Report generated by <a target="_new" href="http://openclover.org">OpenClover</a> v 4.4.1
on Sat Aug 7 2021 12:49:26 MDT using coverage data from Sat Aug 7 2021 12:47:23 MDT.
</li>
</ul>
<ul>
<li>OpenClover is free and open-source software. </li>
</ul>
</section>
</footer> </section> <!-- class="aui-page-panel-content" -->
</div> <!-- class="aui-page-panel-inner" -->
</div> <!-- class="aui-page-panel" -->
</div> <!-- id="page" -->
</body>
</html> | {
"content_hash": "c501cd807f590551b2024ddb777c7a5b",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 297,
"avg_line_length": 43.92822966507177,
"alnum_prop": 0.5097483934211959,
"repo_name": "dcarda/aba.route.validator",
"id": "839599b8a6a3808dd946677d8318ba7037e9c997",
"size": "9181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "target13/site/clover/com/cardatechnologies/utils/validators/abaroutevalidator/Test_AbaRouteValidator_08_testAbaNumberCheck_16019_good_exg.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "18715254"
}
],
"symlink_target": ""
} |
using System;
using System.ComponentModel;
using System.Threading.Tasks;
namespace CSharpFunctionalExtensions
{
public static partial class AsyncResultExtensionsBothOperands
{
[EditorBrowsable(EditorBrowsableState.Never)]
[Obsolete("Use CheckIf() instead.")]
public static Task<Result<T>> TapIf<T>(this Task<Result<T>> resultTask, bool condition, Func<T, Task<Result>> func) =>
CheckIf(resultTask, condition, func);
[EditorBrowsable(EditorBrowsableState.Never)]
[Obsolete("Use CheckIf() instead.")]
public static Task<Result<T>> TapIf<T, K>(this Task<Result<T>> resultTask, bool condition, Func<T, Task<Result<K>>> func) =>
CheckIf(resultTask, condition, func);
[EditorBrowsable(EditorBrowsableState.Never)]
[Obsolete("Use CheckIf() instead.")]
public static Task<Result<T, E>> TapIf<T, K, E>(this Task<Result<T, E>> resultTask, bool condition, Func<T, Task<Result<K, E>>> func) =>
CheckIf(resultTask, condition, func);
[EditorBrowsable(EditorBrowsableState.Never)]
[Obsolete("Use CheckIf() instead.")]
public static Task<Result<T>> TapIf<T>(this Task<Result<T>> resultTask, Func<T, bool> predicate, Func<T, Task<Result>> func) =>
CheckIf(resultTask, predicate, func);
[EditorBrowsable(EditorBrowsableState.Never)]
[Obsolete("Use CheckIf() instead.")]
public static Task<Result<T>> TapIf<T, K>(this Task<Result<T>> resultTask, Func<T, bool> predicate, Func<T, Task<Result<K>>> func) =>
CheckIf(resultTask, predicate, func);
[EditorBrowsable(EditorBrowsableState.Never)]
[Obsolete("Use CheckIf() instead.")]
public static Task<Result<T, E>> TapIf<T, K, E>(this Task<Result<T, E>> resultTask, Func<T, bool> predicate, Func<T, Task<Result<K, E>>> func) =>
CheckIf(resultTask, predicate, func);
}
}
| {
"content_hash": "e91f9c433d57d3ff85e8a488f59361d0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 153,
"avg_line_length": 49.46153846153846,
"alnum_prop": 0.6583722135821669,
"repo_name": "vkhorikov/CSharpFunctionalExtensions",
"id": "b71852b5f9e2705ca95d0e414ca0f9746ddcd9d6",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSharpFunctionalExtensions/Result/Obsolete/TapIfAsyncBoth.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "1923667"
},
{
"name": "Dockerfile",
"bytes": "1196"
}
],
"symlink_target": ""
} |
package com.github.ksoichiro.android.observablescrollview.samples;
import android.os.Bundle;
import android.support.v4.view.ViewCompat;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.Toolbar;
import android.view.LayoutInflater;
import android.view.View;
import com.github.ksoichiro.android.observablescrollview.ObservableRecyclerView;
import com.github.ksoichiro.android.observablescrollview.ObservableScrollViewCallbacks;
import com.github.ksoichiro.android.observablescrollview.ScrollState;
import com.github.ksoichiro.android.observablescrollview.ScrollUtils;
import com.nineoldandroids.view.ViewHelper;
import com.nineoldandroids.view.ViewPropertyAnimator;
public class ToolbarControlRecyclerViewActivity extends BaseActivity implements ObservableScrollViewCallbacks {
private View mHeaderView;
private View mToolbarView;
private ObservableRecyclerView mRecyclerView;
private int mBaseTranslationY;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_toolbarcontrolrecyclerview);
setSupportActionBar((Toolbar) findViewById(R.id.toolbar));
mHeaderView = findViewById(R.id.header);
ViewCompat.setElevation(mHeaderView, getResources().getDimension(R.dimen.toolbar_elevation));
mToolbarView = findViewById(R.id.toolbar);
mRecyclerView = (ObservableRecyclerView) findViewById(R.id.recycler);
mRecyclerView.setScrollViewCallbacks(this);
mRecyclerView.setLayoutManager(new LinearLayoutManager(this));
mRecyclerView.setHasFixedSize(false);
View headerView = LayoutInflater.from(this).inflate(R.layout.recycler_header, null);
setDummyDataWithHeader(mRecyclerView, headerView);
}
@Override
public void onScrollChanged(int scrollY, boolean firstScroll, boolean dragging) {
if (dragging) {
int toolbarHeight = mToolbarView.getHeight();
if (firstScroll) {
float currentHeaderTranslationY = ViewHelper.getTranslationY(mHeaderView);
if (-toolbarHeight < currentHeaderTranslationY && toolbarHeight < scrollY) {
mBaseTranslationY = scrollY;
}
}
float headerTranslationY = ScrollUtils.getFloat(-(scrollY - mBaseTranslationY), -toolbarHeight, 0);
ViewPropertyAnimator.animate(mHeaderView).cancel();
ViewHelper.setTranslationY(mHeaderView, headerTranslationY);
}
}
@Override
public void onDownMotionEvent() {
}
@Override
public void onUpOrCancelMotionEvent(ScrollState scrollState) {
mBaseTranslationY = 0;
float headerTranslationY = ViewHelper.getTranslationY(mHeaderView);
int toolbarHeight = mToolbarView.getHeight();
if (scrollState == ScrollState.UP) {
if (toolbarHeight < mRecyclerView.getCurrentScrollY()) {
if (headerTranslationY != -toolbarHeight) {
ViewPropertyAnimator.animate(mHeaderView).cancel();
ViewPropertyAnimator.animate(mHeaderView).translationY(-toolbarHeight).setDuration(200).start();
}
}
} else if (scrollState == ScrollState.DOWN) {
if (toolbarHeight < mRecyclerView.getCurrentScrollY()) {
if (headerTranslationY != 0) {
ViewPropertyAnimator.animate(mHeaderView).cancel();
ViewPropertyAnimator.animate(mHeaderView).translationY(0).setDuration(200).start();
}
}
}
}
}
| {
"content_hash": "938223adbfb7778ae4ca5f880a67d315",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 116,
"avg_line_length": 42.13793103448276,
"alnum_prop": 0.7032187670485542,
"repo_name": "bensonss/Android-ObservableScrollView",
"id": "9837364409937dd073ceaf3c7cc9bd64c513f384",
"size": "4265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "observablescrollview-samples/src/main/java/com/github/ksoichiro/android/observablescrollview/samples/ToolbarControlRecyclerViewActivity.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "364403"
}
],
"symlink_target": ""
} |
def configure(config):
if 'data_paths' in config['ores'] and \
'nltk' in config['ores']['data_paths']:
import nltk
nltk.data.path.append(config['ores']['data_paths']['nltk'])
from flask import Blueprint, Flask
from . import routes
from ..score_processors import ScoreProcessor
app = Flask(__name__)
app.config['APPLICATION_ROOT'] = config['ores']['wsgi']['application_root']
bp = Blueprint('ores', __name__)
sp_name = config['ores']['score_processor']
score_processor = ScoreProcessor.from_config(config, sp_name)
bp = routes.configure(config, bp, score_processor)
app.register_blueprint(bp, url_prefix=config['ores']['wsgi']['url_prefix'])
return app
| {
"content_hash": "bd4477fd182c72848ee5ebdfce135f73",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 30.458333333333332,
"alnum_prop": 0.6429548563611491,
"repo_name": "aetilley/ores",
"id": "79adddf9992f6b10a6a55d0b6d9e3e0381f113f8",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ores/wsgi/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43517"
},
{
"name": "R",
"bytes": "9229"
},
{
"name": "Shell",
"bytes": "1113"
}
],
"symlink_target": ""
} |
/* Copyright (c) 2006, Sun Microsystems, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems nor the names of its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
*/
# include "incls/_precompiled.incl"
# include "incls/_oop.cpp.incl"
// Called during bootstrapping for computing vtbl values see (create_*Klass)
oopDesc::oopDesc() {
if (!bootstrapping) ShouldNotCallThis();
}
void oopDesc::print_value_on(outputStream* st) {
if (is_mark()) {
markOop(this)->print_on(st);
} else if (is_smi()) {
smiOop(this)->print_on(st);
} else {
#ifdef ASSERT
// In the debug version unused space is cleared after scavenge.
// This means if we try printing an oop pointing to unused space
// its klass() is NULL.
// The following hack can print such oops.
if (klass()->addr() == NULL) {
st->print("Wrong Oop(0x%lx)", this);
} else
#endif
blueprint()->oop_print_value_on(this, st);
}
}
void oopDesc::print_on(outputStream* st) {
if (is_mark()) {
markOop(this)->print_on(st);
} else if (is_smi()) {
smiOop(this)->print_on(st);
} else {
memOop(this)->print_on(st);
}
}
void oopDesc::print() { print_on(std); }
void oopDesc::print_value() { print_value_on(std); }
char* oopDesc::print_string() {
stringStream* st = new stringStream(50);
print_on(st);
return st->as_string();
}
char* oopDesc::print_value_string() {
stringStream* st = new stringStream(50);
print_value_on(st);
return st->as_string();
}
| {
"content_hash": "f887f653d424fa87e8c5233a8f682799",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 130,
"avg_line_length": 38.32432432432432,
"alnum_prop": 0.7200282087447109,
"repo_name": "Michaelangel007/strongtalk",
"id": "22313655cf58a94f38f6ee480b6da5e204677ae6",
"size": "2909",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vm/oops/oop.cpp",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "78417"
},
{
"name": "C++",
"bytes": "3425316"
},
{
"name": "HTML",
"bytes": "6401"
},
{
"name": "Makefile",
"bytes": "871353"
}
],
"symlink_target": ""
} |
package revision
import (
"bytes"
"regexp"
"time"
. "gopkg.in/check.v1"
)
type ParserSuite struct{}
var _ = Suite(&ParserSuite{})
func (s *ParserSuite) TestErrInvalidRevision(c *C) {
e := ErrInvalidRevision{"test"}
c.Assert(e.Error(), Equals, "Revision invalid : test")
}
func (s *ParserSuite) TestNewParserFromString(c *C) {
p := NewParserFromString("test")
c.Assert(p, FitsTypeOf, &Parser{})
}
func (s *ParserSuite) TestScan(c *C) {
parser := NewParser(bytes.NewBufferString("Hello world !"))
expected := []struct {
t token
s string
}{
{
word,
"Hello",
},
{
space,
" ",
},
{
word,
"world",
},
{
space,
" ",
},
{
emark,
"!",
},
}
for i := 0; ; {
tok, str, err := parser.scan()
if tok == eof {
return
}
c.Assert(err, Equals, nil)
c.Assert(str, Equals, expected[i].s)
c.Assert(tok, Equals, expected[i].t)
i++
}
}
func (s *ParserSuite) TestUnscan(c *C) {
parser := NewParser(bytes.NewBufferString("Hello world !"))
tok, str, err := parser.scan()
c.Assert(err, Equals, nil)
c.Assert(str, Equals, "Hello")
c.Assert(tok, Equals, word)
parser.unscan()
tok, str, err = parser.scan()
c.Assert(err, Equals, nil)
c.Assert(str, Equals, "Hello")
c.Assert(tok, Equals, word)
}
func (s *ParserSuite) TestParseWithValidExpression(c *C) {
tim, _ := time.Parse("2006-01-02T15:04:05Z", "2016-12-16T21:42:47Z")
datas := map[string]Revisioner{
"@": []Revisioner{Ref("HEAD")},
"@~3": []Revisioner{
Ref("HEAD"),
TildePath{3},
},
"@{2016-12-16T21:42:47Z}": []Revisioner{AtDate{tim}},
"@{1}": []Revisioner{AtReflog{1}},
"@{-1}": []Revisioner{AtCheckout{1}},
"master@{upstream}": []Revisioner{
Ref("master"),
AtUpstream{},
},
"@{upstream}": []Revisioner{
AtUpstream{},
},
"@{u}": []Revisioner{
AtUpstream{},
},
"master@{push}": []Revisioner{
Ref("master"),
AtPush{},
},
"master@{2016-12-16T21:42:47Z}": []Revisioner{
Ref("master"),
AtDate{tim},
},
"HEAD^": []Revisioner{
Ref("HEAD"),
CaretPath{1},
},
"master~3": []Revisioner{
Ref("master"),
TildePath{3},
},
"v0.99.8^{commit}": []Revisioner{
Ref("v0.99.8"),
CaretType{"commit"},
},
"v0.99.8^{}": []Revisioner{
Ref("v0.99.8"),
CaretType{"tag"},
},
"HEAD^{/fix nasty bug}": []Revisioner{
Ref("HEAD"),
CaretReg{regexp.MustCompile("fix nasty bug"), false},
},
":/fix nasty bug": []Revisioner{
ColonReg{regexp.MustCompile("fix nasty bug"), false},
},
"HEAD:README": []Revisioner{
Ref("HEAD"),
ColonPath{"README"},
},
":README": []Revisioner{
ColonPath{"README"},
},
"master:./README": []Revisioner{
Ref("master"),
ColonPath{"./README"},
},
"master^1~:./README": []Revisioner{
Ref("master"),
CaretPath{1},
TildePath{1},
ColonPath{"./README"},
},
":0:README": []Revisioner{
ColonStagePath{"README", 0},
},
":3:README": []Revisioner{
ColonStagePath{"README", 3},
},
"master~1^{/update}~5~^^1": []Revisioner{
Ref("master"),
TildePath{1},
CaretReg{regexp.MustCompile("update"), false},
TildePath{5},
TildePath{1},
CaretPath{1},
CaretPath{1},
},
}
for d, expected := range datas {
parser := NewParser(bytes.NewBufferString(d))
result, err := parser.Parse()
c.Assert(err, Equals, nil)
c.Assert(result, DeepEquals, expected)
}
}
func (s *ParserSuite) TestParseWithInvalidExpression(c *C) {
datas := map[string]error{
"..": &ErrInvalidRevision{`must not start with "."`},
"master^1master": &ErrInvalidRevision{`reference must be defined once at the beginning`},
"master^1@{2016-12-16T21:42:47Z}": &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<ISO-8601 date>}, @{<ISO-8601 date>}`},
"master^1@{1}": &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<n>}, @{<n>}`},
"master@{-1}": &ErrInvalidRevision{`"@" statement is not valid, could be : @{-<n>}`},
"master^1@{upstream}": &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{upstream}, @{upstream}, <refname>@{u}, @{u}`},
"master^1@{u}": &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{upstream}, @{upstream}, <refname>@{u}, @{u}`},
"master^1@{push}": &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{push}, @{push}`},
"^1": &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`},
"^{/test}": &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`},
"~1": &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`},
"master:/test": &ErrInvalidRevision{`":" statement is not valid, could be : :/<regexp>`},
"master:0:README": &ErrInvalidRevision{`":" statement is not valid, could be : :<n>:<path>`},
"^{/": &ErrInvalidRevision{`missing "}" in ^{<data>} structure`},
"~@{": &ErrInvalidRevision{`missing "}" in @{<data>} structure`},
"@@{{0": &ErrInvalidRevision{`missing "}" in @{<data>} structure`},
}
for s, e := range datas {
parser := NewParser(bytes.NewBufferString(s))
_, err := parser.Parse()
c.Assert(err, DeepEquals, e)
}
}
func (s *ParserSuite) TestParseAtWithValidExpression(c *C) {
tim, _ := time.Parse("2006-01-02T15:04:05Z", "2016-12-16T21:42:47Z")
datas := map[string]Revisioner{
"": Ref("HEAD"),
"{1}": AtReflog{1},
"{-1}": AtCheckout{1},
"{push}": AtPush{},
"{upstream}": AtUpstream{},
"{u}": AtUpstream{},
"{2016-12-16T21:42:47Z}": AtDate{tim},
}
for d, expected := range datas {
parser := NewParser(bytes.NewBufferString(d))
result, err := parser.parseAt()
c.Assert(err, Equals, nil)
c.Assert(result, DeepEquals, expected)
}
}
func (s *ParserSuite) TestParseAtWithInvalidExpression(c *C) {
datas := map[string]error{
"{test}": &ErrInvalidRevision{`wrong date "test" must fit ISO-8601 format : 2006-01-02T15:04:05Z`},
"{-1": &ErrInvalidRevision{`missing "}" in @{-n} structure`},
}
for s, e := range datas {
parser := NewParser(bytes.NewBufferString(s))
_, err := parser.parseAt()
c.Assert(err, DeepEquals, e)
}
}
func (s *ParserSuite) TestParseCaretWithValidExpression(c *C) {
datas := map[string]Revisioner{
"": CaretPath{1},
"2": CaretPath{2},
"{}": CaretType{"tag"},
"{commit}": CaretType{"commit"},
"{tree}": CaretType{"tree"},
"{blob}": CaretType{"blob"},
"{tag}": CaretType{"tag"},
"{object}": CaretType{"object"},
"{/hello world !}": CaretReg{regexp.MustCompile("hello world !"), false},
"{/!-hello world !}": CaretReg{regexp.MustCompile("hello world !"), true},
"{/!! hello world !}": CaretReg{regexp.MustCompile("! hello world !"), false},
}
for d, expected := range datas {
parser := NewParser(bytes.NewBufferString(d))
result, err := parser.parseCaret()
c.Assert(err, Equals, nil)
c.Assert(result, DeepEquals, expected)
}
}
func (s *ParserSuite) TestParseCaretWithUnValidExpression(c *C) {
datas := map[string]error{
"3": &ErrInvalidRevision{`"3" found must be 0, 1 or 2 after "^"`},
"{test}": &ErrInvalidRevision{`"test" is not a valid revision suffix brace component`},
"{/!test}": &ErrInvalidRevision{`revision suffix brace component sequences starting with "/!" others than those defined are reserved`},
"{/test**}": &ErrInvalidRevision{"revision suffix brace component, error parsing regexp: invalid nested repetition operator: `**`"},
}
for s, e := range datas {
parser := NewParser(bytes.NewBufferString(s))
_, err := parser.parseCaret()
c.Assert(err, DeepEquals, e)
}
}
func (s *ParserSuite) TestParseTildeWithValidExpression(c *C) {
datas := map[string]Revisioner{
"3": TildePath{3},
"1": TildePath{1},
"": TildePath{1},
}
for d, expected := range datas {
parser := NewParser(bytes.NewBufferString(d))
result, err := parser.parseTilde()
c.Assert(err, Equals, nil)
c.Assert(result, DeepEquals, expected)
}
}
func (s *ParserSuite) TestParseColonWithValidExpression(c *C) {
datas := map[string]Revisioner{
"/hello world !": ColonReg{regexp.MustCompile("hello world !"), false},
"/!-hello world !": ColonReg{regexp.MustCompile("hello world !"), true},
"/!! hello world !": ColonReg{regexp.MustCompile("! hello world !"), false},
"../parser.go": ColonPath{"../parser.go"},
"./parser.go": ColonPath{"./parser.go"},
"parser.go": ColonPath{"parser.go"},
"0:parser.go": ColonStagePath{"parser.go", 0},
"1:parser.go": ColonStagePath{"parser.go", 1},
"2:parser.go": ColonStagePath{"parser.go", 2},
"3:parser.go": ColonStagePath{"parser.go", 3},
}
for d, expected := range datas {
parser := NewParser(bytes.NewBufferString(d))
result, err := parser.parseColon()
c.Assert(err, Equals, nil)
c.Assert(result, DeepEquals, expected)
}
}
func (s *ParserSuite) TestParseColonWithUnValidExpression(c *C) {
datas := map[string]error{
"/!test": &ErrInvalidRevision{`revision suffix brace component sequences starting with "/!" others than those defined are reserved`},
"/*": &ErrInvalidRevision{"revision suffix brace component, error parsing regexp: missing argument to repetition operator: `*`"},
}
for s, e := range datas {
parser := NewParser(bytes.NewBufferString(s))
_, err := parser.parseColon()
c.Assert(err, DeepEquals, e)
}
}
func (s *ParserSuite) TestParseRefWithValidName(c *C) {
datas := []string{
"lock",
"master",
"v1.0.0",
"refs/stash",
"refs/tags/v1.0.0",
"refs/heads/master",
"refs/remotes/test",
"refs/remotes/origin/HEAD",
"refs/remotes/origin/master",
"0123abcd", // short hash
}
for _, d := range datas {
parser := NewParser(bytes.NewBufferString(d))
result, err := parser.parseRef()
c.Assert(err, Equals, nil)
c.Assert(result, Equals, Ref(d))
}
}
func (s *ParserSuite) TestParseRefWithInvalidName(c *C) {
datas := map[string]error{
".master": &ErrInvalidRevision{`must not start with "."`},
"/master": &ErrInvalidRevision{`must not start with "/"`},
"master/": &ErrInvalidRevision{`must not end with "/"`},
"master.": &ErrInvalidRevision{`must not end with "."`},
"refs/remotes/.origin/HEAD": &ErrInvalidRevision{`must not contains "/."`},
"test..test": &ErrInvalidRevision{`must not contains ".."`},
"test..": &ErrInvalidRevision{`must not contains ".."`},
"test test": &ErrInvalidRevision{`must not contains " "`},
"test*test": &ErrInvalidRevision{`must not contains "*"`},
"test?test": &ErrInvalidRevision{`must not contains "?"`},
"test\\test": &ErrInvalidRevision{`must not contains "\"`},
"test[test": &ErrInvalidRevision{`must not contains "["`},
"te//st": &ErrInvalidRevision{`must not contains consecutively "/"`},
"refs/remotes/test.lock/HEAD": &ErrInvalidRevision{`cannot end with .lock`},
"test.lock": &ErrInvalidRevision{`cannot end with .lock`},
}
for s, e := range datas {
parser := NewParser(bytes.NewBufferString(s))
_, err := parser.parseRef()
c.Assert(err, DeepEquals, e)
}
}
| {
"content_hash": "ef219cccf6dc34aef24562cc02359e56",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 154,
"avg_line_length": 29.729323308270676,
"alnum_prop": 0.5858202663968977,
"repo_name": "go-git/go-git",
"id": "3a77b2f11e61792eb43c6848f31b90a9dadddcec",
"size": "11862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "internal/revision/parser_test.go",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "2040586"
},
{
"name": "Makefile",
"bytes": "828"
},
{
"name": "Shell",
"bytes": "2051"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8"?>
<metadata>
<groupId>koncept.pheidippides</groupId>
<artifactId>pheidippides-repository-2</artifactId>
<versioning>
<versions>
<version>1.0-SNAPSHOT</version>
</versions>
<lastUpdated>20140415195921</lastUpdated>
</versioning>
</metadata>
| {
"content_hash": "9924d66b0c39217dfbc4d4c2423655ad",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 27.545454545454547,
"alnum_prop": 0.6996699669966997,
"repo_name": "custom-koncept-ltd/Pheidippides",
"id": "f2bf3b46f75bca4dfae1548b281117b9d6fed729",
"size": "303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pheidippides-repository/src/test/resources/test-maven-metadata-local-2.xml",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "56576"
}
],
"symlink_target": ""
} |
"""
Amazon EC2, Eucalyptus and Nimbus drivers.
"""
from __future__ import with_statement
import sys
import base64
import os
import copy
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b
from libcloud.utils.xml import fixxpath, findtext, findattr, findall
from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection
from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
LibcloudError)
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume
API_VERSION = '2010-08-31'
NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION)
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
"""
INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': 613,
'disk': 15,
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': 1740,
'disk': 160,
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Medium Instance',
'ram': 3700,
'disk': 410,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': 7680,
'disk': 850,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': 1690,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': 1740,
'disk': 350,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': 7680,
'disk': 1690,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': 35021,
'disk': 850,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1690,
'bandwidth': None
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': None,
'bandwidth': None
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'Double Extra Large Instance',
'ram': 30720,
'disk': None,
'bandwidth': None
},
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'name': 'Cluster GPU Quadruple Extra Large Instance',
'ram': 22528,
'disk': 1690,
'bandwidth': None
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 23552,
'disk': 1690,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': 63488,
'disk': 3370,
'bandwidth': None
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'High Memory Cluster Eight Extra Large',
'ram': 244000,
'disk': 240,
'bandwidth': None
},
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'name': 'High Storage Eight Extra Large Instance',
'ram': 119808,
'disk': 48000,
'bandwidth': None
}
}
REGION_DETAILS = {
'us-east-1': {
'endpoint': 'ec2.us-east-1.amazonaws.com',
'api_name': 'ec2_us_east',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'cg1.4xlarge',
'cr1.8xlarge',
'hs1.8xlarge'
]
},
'us-west-1': {
'endpoint': 'ec2.us-west-1.amazonaws.com',
'api_name': 'ec2_us_west',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'us-west-2': {
'endpoint': 'ec2.us-west-2.amazonaws.com',
'api_name': 'ec2_us_west_oregon',
'country': 'US',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge'
]
},
'eu-west-1': {
'endpoint': 'ec2.eu-west-1.amazonaws.com',
'api_name': 'ec2_eu_west',
'country': 'Ireland',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge'
]
},
'ap-southeast-1': {
'endpoint': 'ec2.ap-southeast-1.amazonaws.com',
'api_name': 'ec2_ap_southeast',
'country': 'Singapore',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'ap-northeast-1': {
'endpoint': 'ec2.ap-northeast-1.amazonaws.com',
'api_name': 'ec2_ap_northeast',
'country': 'Japan',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'sa-east-1': {
'endpoint': 'ec2.sa-east-1.amazonaws.com',
'api_name': 'ec2_sa_east',
'country': 'Brazil',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'c1.medium',
'c1.xlarge'
]
},
'ap-southeast-2': {
'endpoint': 'ec2.ap-southeast-2.amazonaws.com',
'api_name': 'ec2_ap_southeast_2',
'country': 'Australia',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'nimbus': {
# Nimbus clouds have 3 EC2-style instance types but their particular
# RAM allocations are configured by the admin
'country': 'custom',
'instance_types': [
'm1.small',
'm1.large',
'm1.xlarge'
]
}
}
VALID_EC2_DATACENTERS = REGION_DETAILS.keys()
VALID_EC2_DATACENTERS = [d for d in VALID_EC2_DATACENTERS if d != 'nimbus']
class EC2NodeLocation(NodeLocation):
def __init__(self, id, name, country, driver, availability_zone):
super(EC2NodeLocation, self).__init__(id, name, country, driver)
self.availability_zone = availability_zone
def __repr__(self):
return (('<EC2NodeLocation: id=%s, name=%s, country=%s, '
'availability_zone=%s driver=%s>')
% (self.id, self.name, self.country,
self.availability_zone, self.driver.name))
class EC2Response(AWSBaseResponse):
"""
EC2 specific response parsing and error handling.
"""
def parse_error(self):
err_list = []
# Okay, so for Eucalyptus, you can get a 403, with no body,
# if you are using the wrong user/password.
msg = "Failure: 403 Forbidden"
if self.status == 403 and self.body[:len(msg)] == msg:
raise InvalidCredsError(msg)
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body, driver=EC2NodeDriver)
for err in body.findall('Errors/Error'):
code, message = err.getchildren()
err_list.append("%s: %s" % (code.text, message.text))
if code.text == "InvalidClientTokenId":
raise InvalidCredsError(err_list[-1])
if code.text == "SignatureDoesNotMatch":
raise InvalidCredsError(err_list[-1])
if code.text == "AuthFailure":
raise InvalidCredsError(err_list[-1])
if code.text == "OptInRequired":
raise InvalidCredsError(err_list[-1])
if code.text == "IdempotentParameterMismatch":
raise IdempotentParamError(err_list[-1])
return "\n".join(err_list)
class EC2Connection(SignedAWSConnection):
"""
Represents a single connection to the EC2 Endpoint.
"""
version = API_VERSION
host = REGION_DETAILS['us-east-1']['endpoint']
responseCls = EC2Response
class ExEC2AvailabilityZone(object):
"""
Extension class which stores information about an EC2 availability zone.
Note: This class is EC2 specific.
"""
def __init__(self, name, zone_state, region_name):
self.name = name
self.zone_state = zone_state
self.region_name = region_name
def __repr__(self):
return (('<ExEC2AvailabilityZone: name=%s, zone_state=%s, '
'region_name=%s>')
% (self.name, self.zone_state, self.region_name))
class BaseEC2NodeDriver(NodeDriver):
"""
Base Amazon EC2 node driver.
Used for main EC2 and other derivate driver classes to inherit from it.
"""
connectionCls = EC2Connection
path = '/'
features = {'create_node': ['ssh_key']}
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED
}
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params["%s.%s" % (key, i)] = value
return params
def _get_boolean(self, element):
tag = "{%s}%s" % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_state_boolean(self, element):
"""
Checks for the instances's state
"""
state = findall(element=element,
xpath='instancesSet/item/currentState/name',
namespace=NAMESPACE)[0].text
return state in ('stopping', 'pending', 'starting')
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([term_status == status
for term_status
in ('shutting-down', 'terminated')])
def _to_nodes(self, object, xpath, groups=None):
return [self._to_node(el, groups=groups)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_node(self, element, groups=None):
try:
state = self.NODE_STATE_MAP[findattr(element=element,
xpath="instanceState/name",
namespace=NAMESPACE)
]
except KeyError:
state = NodeState.UNKNOWN
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
tags = dict((findtext(element=item, xpath='key', namespace=NAMESPACE),
findtext(element=item, xpath='value',
namespace=NAMESPACE))
for item in findall(element=element,
xpath='tagSet/item',
namespace=NAMESPACE)
)
name = tags.get('Name', instance_id)
public_ip = findtext(element=element, xpath='ipAddress',
namespace=NAMESPACE)
public_ips = [public_ip] if public_ip else []
private_ip = findtext(element=element, xpath='privateIpAddress',
namespace=NAMESPACE)
private_ips = [private_ip] if private_ip else []
n = Node(
id=findtext(element=element, xpath='instanceId',
namespace=NAMESPACE),
name=name,
state=state,
public_ips=public_ips,
private_ips=private_ips,
driver=self.connection.driver,
extra={
'dns_name': findattr(element=element, xpath="dnsName",
namespace=NAMESPACE),
'instanceId': findattr(element=element, xpath="instanceId",
namespace=NAMESPACE),
'imageId': findattr(element=element, xpath="imageId",
namespace=NAMESPACE),
'private_dns': findattr(element=element,
xpath="privateDnsName",
namespace=NAMESPACE),
'status': findattr(element=element, xpath="instanceState/name",
namespace=NAMESPACE),
'keyname': findattr(element=element, xpath="keyName",
namespace=NAMESPACE),
'launchindex': findattr(element=element,
xpath="amiLaunchIndex",
namespace=NAMESPACE),
'productcode': [
p.text for p in findall(
element=element,
xpath="productCodesSet/item/productCode",
namespace=NAMESPACE
)],
'instancetype': findattr(element=element, xpath="instanceType",
namespace=NAMESPACE),
'launchdatetime': findattr(element=element, xpath="launchTime",
namespace=NAMESPACE),
'availability': findattr(element,
xpath="placement/availabilityZone",
namespace=NAMESPACE),
'kernelid': findattr(element=element, xpath="kernelId",
namespace=NAMESPACE),
'ramdiskid': findattr(element=element, xpath="ramdiskId",
namespace=NAMESPACE),
'clienttoken': findattr(element=element, xpath="clientToken",
namespace=NAMESPACE),
'groups': groups,
'tags': tags
}
)
return n
def _to_images(self, object):
return [self._to_image(el) for el in object.findall(
fixxpath(xpath='imagesSet/item', namespace=NAMESPACE))
]
def _to_image(self, element):
n = NodeImage(
id=findtext(element=element, xpath='imageId', namespace=NAMESPACE),
name=findtext(element=element, xpath='imageLocation',
namespace=NAMESPACE),
driver=self.connection.driver,
extra={
'state': findattr(element=element, xpath="imageState",
namespace=NAMESPACE),
'ownerid': findattr(element=element, xpath="imageOwnerId",
namespace=NAMESPACE),
'owneralias': findattr(element=element,
xpath="imageOwnerAlias",
namespace=NAMESPACE),
'ispublic': findattr(element=element,
xpath="isPublic",
namespace=NAMESPACE),
'architecture': findattr(element=element,
xpath="architecture",
namespace=NAMESPACE),
'imagetype': findattr(element=element,
xpath="imageType",
namespace=NAMESPACE),
'platform': findattr(element=element,
xpath="platform",
namespace=NAMESPACE),
'rootdevicetype': findattr(element=element,
xpath="rootDeviceType",
namespace=NAMESPACE),
'virtualizationtype': findattr(
element=element, xpath="virtualizationType",
namespace=NAMESPACE),
'hypervisor': findattr(element=element,
xpath="hypervisor",
namespace=NAMESPACE)
}
)
return n
def _to_volume(self, element, name):
volId = findtext(element=element, xpath='volumeId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='size', namespace=NAMESPACE)
return StorageVolume(id=volId,
name=name,
size=int(size),
driver=self)
def list_nodes(self, ex_node_ids=None):
"""
List all nodes
Ex_node_ids parameter is used to filter the list of
nodes that should be returned. Only the nodes
with the corresponding node ids will be returned.
@param ex_node_ids: List of C{node.id}
@type ex_node_ids: C{list} of C{str}
@rtype: C{list} of L{Node}
"""
params = {'Action': 'DescribeInstances'}
if ex_node_ids:
params.update(self._pathlist('InstanceId', ex_node_ids))
elem = self.connection.request(self.path, params=params).object
nodes = []
for rs in findall(element=elem, xpath='reservationSet/item',
namespace=NAMESPACE):
groups = [g.findtext('')
for g in findall(element=rs,
xpath='groupSet/item/groupId',
namespace=NAMESPACE)]
nodes += self._to_nodes(rs, 'instancesSet/item', groups)
nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes)
for node in nodes:
ips = nodes_elastic_ips_mappings[node.id]
node.public_ips.extend(ips)
return nodes
def list_sizes(self, location=None):
available_types = REGION_DETAILS[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
def list_images(self, location=None, ex_image_ids=None):
"""
List all images
Ex_image_ids parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding image ids will be returned.
@param ex_image_ids: List of C{NodeImage.id}
@type ex_image_ids: C{list} of C{str}
@rtype: C{list} of L{NodeImage}
"""
params = {'Action': 'DescribeImages'}
if ex_image_ids:
params.update(self._pathlist('ImageId', ex_image_ids))
images = self._to_images(
self.connection.request(self.path, params=params).object
)
return images
def list_locations(self):
locations = []
for index, availability_zone in \
enumerate(self.ex_list_availability_zones()):
locations.append(EC2NodeLocation(
index, availability_zone.name, self.country, self,
availability_zone)
)
return locations
def create_volume(self, size, name, location=None, snapshot=None):
params = {
'Action': 'CreateVolume',
'Size': str(size)}
if location is not None:
params['AvailabilityZone'] = location.availability_zone.name
volume = self._to_volume(
self.connection.request(self.path, params=params).object,
name=name)
self.ex_create_tags(volume, {'Name': name})
return volume
def destroy_volume(self, volume):
params = {
'Action': 'DeleteVolume',
'VolumeId': volume.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def attach_volume(self, node, volume, device):
params = {
'Action': 'AttachVolume',
'VolumeId': volume.id,
'InstanceId': node.id,
'Device': device}
self.connection.request(self.path, params=params)
return True
def detach_volume(self, volume):
params = {
'Action': 'DetachVolume',
'VolumeId': volume.id}
self.connection.request(self.path, params=params)
return True
def ex_create_keypair(self, name):
"""Creates a new keypair
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the keypair to Create. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
@type name: C{str}
@rtype: C{dict}
"""
params = {
'Action': 'CreateKeyPair',
'KeyName': name,
}
response = self.connection.request(self.path, params=params).object
key_material = findtext(element=response, xpath='keyMaterial',
namespace=NAMESPACE)
key_fingerprint = findtext(element=response, xpath='keyFingerprint',
namespace=NAMESPACE)
return {
'keyMaterial': key_material,
'keyFingerprint': key_fingerprint,
}
def ex_import_keypair(self, name, keyfile):
"""
imports a new public key
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
@type name: C{str}
@param keyfile: The filename with path of the public key to import.
@type keyfile: C{str}
@rtype: C{dict}
"""
with open(os.path.expanduser(keyfile)) as fh:
content = fh.read()
base64key = base64.b64encode(content)
params = {
'Action': 'ImportKeyPair',
'KeyName': name,
'PublicKeyMaterial': base64key
}
response = self.connection.request(self.path, params=params).object
key_name = findtext(element=response, xpath='keyName',
namespace=NAMESPACE)
key_fingerprint = findtext(element=response, xpath='keyFingerprint',
namespace=NAMESPACE)
return {
'keyName': key_name,
'keyFingerprint': key_fingerprint,
}
def ex_describe_all_keypairs(self):
"""
Describes all keypairs.
@note: This is a non-standard extension API, and only works for EC2.
@rtype: C{list} of C{str}
"""
params = {
'Action': 'DescribeKeyPairs'
}
response = self.connection.request(self.path, params=params).object
names = []
for elem in findall(element=response, xpath='keySet/item',
namespace=NAMESPACE):
name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE)
names.append(name)
return names
def ex_describe_keypairs(self, name):
"""Describes a keypair by name
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the keypair to describe.
@type name: C{str}
@rtype: C{dict}
"""
params = {
'Action': 'DescribeKeyPairs',
'KeyName.1': name
}
response = self.connection.request(self.path, params=params).object
key_name = findattr(element=response, xpath='keySet/item/keyName',
namespace=NAMESPACE)
return {
'keyName': key_name
}
def ex_list_security_groups(self):
"""
List existing Security Groups.
@note: This is a non-standard extension API, and only works for EC2.
@rtype: C{list} of C{str}
"""
params = {'Action': 'DescribeSecurityGroups'}
response = self.connection.request(self.path, params=params).object
groups = []
for group in findall(element=response, xpath='securityGroupInfo/item',
namespace=NAMESPACE):
name = findtext(element=group, xpath='groupName',
namespace=NAMESPACE)
groups.append(name)
return groups
def ex_create_security_group(self, name, description):
"""
Creates a new Security Group
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the security group to Create.
This must be unique.
@type name: C{str}
@param description: Human readable description of a Security
Group.
@type description: C{str}
@rtype: C{str}
"""
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
return self.connection.request(self.path, params=params).object
def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip,
protocol='tcp'):
"""
Edit a Security Group to allow specific traffic.
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the security group to edit
@type name: C{str}
@param from_port: The beginning of the port range to open
@type from_port: C{str}
@param to_port: The end of the port range to open
@type to_port: C{str}
@param cidr_ip: The ip to allow traffic for.
@type cidr_ip: C{str}
@param protocol: tcp/udp/icmp
@type protocol: C{str}
@rtype: C{bool}
"""
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': protocol,
'FromPort': str(from_port),
'ToPort': str(to_port),
'CidrIp': cidr_ip}
try:
resp = self.connection.request(
self.path, params=params.copy()).object
return bool(findtext(element=resp, xpath='return',
namespace=NAMESPACE))
except Exception:
e = sys.exc_info()[1]
if e.args[0].find('InvalidPermission.Duplicate') == -1:
raise e
def ex_authorize_security_group_permissive(self, name):
"""
Edit a Security Group to allow all traffic.
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the security group to edit
@type name: C{str}
@rtype: C{list} of C{str}
"""
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def ex_list_availability_zones(self, only_available=True):
"""
Return a list of L{ExEC2AvailabilityZone} objects for the
current region.
Note: This is an extension method and is only available for EC2
driver.
@keyword only_available: If true, return only availability zones
with state 'available'
@type only_available: C{str}
@rtype: C{list} of L{ExEC2AvailabilityZone}
"""
params = {'Action': 'DescribeAvailabilityZones'}
if only_available:
params.update({'Filter.0.Name': 'state'})
params.update({'Filter.0.Value.0': 'available'})
params.update({'Filter.1.Name': 'region-name'})
params.update({'Filter.1.Value.0': self.region_name})
result = self.connection.request(self.path,
params=params.copy()).object
availability_zones = []
for element in findall(element=result,
xpath='availabilityZoneInfo/item',
namespace=NAMESPACE):
name = findtext(element=element, xpath='zoneName',
namespace=NAMESPACE)
zone_state = findtext(element=element, xpath='zoneState',
namespace=NAMESPACE)
region_name = findtext(element=element, xpath='regionName',
namespace=NAMESPACE)
availability_zone = ExEC2AvailabilityZone(
name=name,
zone_state=zone_state,
region_name=region_name
)
availability_zones.append(availability_zone)
return availability_zones
def ex_describe_tags(self, resource):
"""
Return a dictionary of tags for a resource (Node or StorageVolume).
@param resource: resource which should be used
@type resource: L{Node} or L{StorageVolume}
@return: dict Node tags
@rtype: C{dict}
"""
params = {'Action': 'DescribeTags',
'Filter.0.Name': 'resource-id',
'Filter.0.Value.0': resource.id,
'Filter.1.Name': 'resource-type',
'Filter.1.Value.0': 'instance',
}
result = self.connection.request(self.path,
params=params.copy()).object
tags = {}
for element in findall(element=result, xpath='tagSet/item',
namespace=NAMESPACE):
key = findtext(element=element, xpath='key', namespace=NAMESPACE)
value = findtext(element=element,
xpath='value', namespace=NAMESPACE)
tags[key] = value
return tags
def ex_create_tags(self, resource, tags):
"""
Create tags for a resource (Node or StorageVolume).
@param resource: Resource to be tagged
@type resource: L{Node} or L{StorageVolume}
@param tags: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
@type tags: C{dict}
@rtype: C{bool}
"""
if not tags:
return
params = {'Action': 'CreateTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
result = self.connection.request(self.path,
params=params.copy()).object
element = findtext(element=result, xpath='return',
namespace=NAMESPACE)
return element == 'true'
def ex_delete_tags(self, resource, tags):
"""
Delete tags from a resource.
@param resource: Resource to be tagged
@type resource: L{Node} or L{StorageVolume}
@param tags: A dictionary or other mapping of strings to strings,
specifying the tag names and tag values to be deleted.
@type tags: C{dict}
@rtype: C{bool}
"""
if not tags:
return
params = {'Action': 'DeleteTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
result = self.connection.request(self.path,
params=params.copy()).object
element = findtext(element=result, xpath='return',
namespace=NAMESPACE)
return element == 'true'
def _add_instance_filter(self, params, node):
"""
Add instance filter to the provided params dictionary.
"""
params.update({
'Filter.0.Name': 'instance-id',
'Filter.0.Value.0': node.id
})
def ex_describe_all_addresses(self, only_allocated=False):
"""
Return all the Elastic IP addresses for this account
optionally, return only the allocated addresses
@param only_allocated: If true, return only those addresses
that are associated with an instance
@type only_allocated: C{str}
@return: list list of elastic ips for this particular account.
@rtype: C{list} of C{str}
"""
params = {'Action': 'DescribeAddresses'}
result = self.connection.request(self.path,
params=params.copy()).object
# the list which we return
elastic_ip_addresses = []
for element in findall(element=result, xpath='addressesSet/item',
namespace=NAMESPACE):
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
# if only allocated addresses are requested
if only_allocated and not instance_id:
continue
ip_address = findtext(element=element, xpath='publicIp',
namespace=NAMESPACE)
elastic_ip_addresses.append(ip_address)
return elastic_ip_addresses
def ex_associate_addresses(self, node, elastic_ip_address):
"""
Associate an IP address with a particular node.
@param node: Node instance
@type node: L{Node}
@param elastic_ip_address: IP address which should be used
@type elastic_ip_address: C{str}
@rtype: C{bool}
"""
params = {'Action': 'AssociateAddress'}
params.update(self._pathlist('InstanceId', [node.id]))
params.update({'PublicIp': elastic_ip_address})
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_describe_addresses(self, nodes):
"""
Return Elastic IP addresses for all the nodes in the provided list.
@param nodes: List of C{Node} instances
@type nodes: C{list} of L{Node}
@return: Dictionary where a key is a node ID and the value is a
list with the Elastic IP addresses associated with this node.
@rtype: C{dict}
"""
if not nodes:
return {}
params = {'Action': 'DescribeAddresses'}
if len(nodes) == 1:
self._add_instance_filter(params, nodes[0])
result = self.connection.request(self.path,
params=params.copy()).object
node_instance_ids = [node.id for node in nodes]
nodes_elastic_ip_mappings = {}
for node_id in node_instance_ids:
nodes_elastic_ip_mappings.setdefault(node_id, [])
for element in findall(element=result, xpath='addressesSet/item',
namespace=NAMESPACE):
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
ip_address = findtext(element=element, xpath='publicIp',
namespace=NAMESPACE)
if instance_id not in node_instance_ids:
continue
nodes_elastic_ip_mappings[instance_id].append(ip_address)
return nodes_elastic_ip_mappings
def ex_describe_addresses_for_node(self, node):
"""
Return a list of Elastic IP addresses associated with this node.
@param node: Node instance
@type node: L{Node}
@return: list Elastic IP addresses attached to this node.
@rtype: C{list} of C{str}
"""
node_elastic_ips = self.ex_describe_addresses([node])
return node_elastic_ips[node.id]
def ex_modify_instance_attribute(self, node, attributes):
"""
Modify node attributes.
A list of valid attributes can be found at http://goo.gl/gxcj8
@param node: Node instance
@type node: L{Node}
@param attributes: Dictionary with node attributes
@type attributes: C{dict}
@return: True on success, False otherwise.
@rtype: C{bool}
"""
attributes = attributes or {}
attributes.update({'InstanceId': node.id})
params = {'Action': 'ModifyInstanceAttribute'}
params.update(attributes)
result = self.connection.request(self.path,
params=params.copy()).object
element = findtext(element=result, xpath='return',
namespace=NAMESPACE)
return element == 'true'
def ex_change_node_size(self, node, new_size):
"""
Change the node size.
Note: Node must be turned of before changing the size.
@param node: Node instance
@type node: L{Node}
@param new_size: NodeSize intance
@type new_size: L{NodeSize}
@return: True on success, False otherwise.
@rtype: C{bool}
"""
if 'instancetype' in node.extra:
current_instance_type = node.extra['instancetype']
if current_instance_type == new_size.id:
raise ValueError('New instance size is the same as' +
'the current one')
attributes = {'InstanceType.Value': new_size.id}
return self.ex_modify_instance_attribute(node, attributes)
def create_node(self, **kwargs):
"""Create a new EC2 node
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@inherits: L{NodeDriver.create_node}
@keyword ex_mincount: Minimum number of instances to launch
@type ex_mincount: C{int}
@keyword ex_maxcount: Maximum number of instances to launch
@type ex_maxcount: C{int}
@keyword ex_securitygroup: Name of security group
@type ex_securitygroup: C{str}
@keyword ex_keyname: The name of the key pair
@type ex_keyname: C{str}
@keyword ex_userdata: User data
@type ex_userdata: C{str}
@keyword ex_clienttoken: Unique identifier to ensure idempotency
@type ex_clienttoken: C{str}
@keyword ex_blockdevicemappings: C{list} of C{dict} block device
mappings. Example:
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'}]
@type ex_blockdevicemappings: C{list} of C{dict}
"""
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': kwargs.get('ex_mincount', '1'),
'MaxCount': kwargs.get('ex_maxcount', '1'),
'InstanceType': size.id
}
if 'ex_securitygroup' in kwargs:
if not isinstance(kwargs['ex_securitygroup'], list):
kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']]
for sig in range(len(kwargs['ex_securitygroup'])):
params['SecurityGroup.%d' % (sig + 1,)] =\
kwargs['ex_securitygroup'][sig]
if 'location' in kwargs:
availability_zone = getattr(kwargs['location'],
'availability_zone', None)
if availability_zone:
if availability_zone.region_name != self.region_name:
raise AttributeError('Invalid availability zone: %s'
% (availability_zone.name))
params['Placement.AvailabilityZone'] = availability_zone.name
if 'ex_keyname' in kwargs:
params['KeyName'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\
.decode('utf-8')
if 'ex_clienttoken' in kwargs:
params['ClientToken'] = kwargs['ex_clienttoken']
if 'ex_blockdevicemappings' in kwargs:
for index, mapping in enumerate(kwargs['ex_blockdevicemappings']):
params['BlockDeviceMapping.%d.DeviceName' % (index + 1)] = \
mapping['DeviceName']
params['BlockDeviceMapping.%d.VirtualName' % (index + 1)] = \
mapping['VirtualName']
object = self.connection.request(self.path, params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
for node in nodes:
tags = {'Name': kwargs['name']}
try:
self.ex_create_tags(resource=node, tags=tags)
except Exception:
continue
node.name = kwargs['name']
node.extra.update({'tags': tags})
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_start_node(self, node):
"""
Start the node by passing in the node object, does not work with
instance store backed instances
@param node: Node which should be used
@type node: L{Node}
@rtype: C{bool}
"""
params = {'Action': 'StartInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_stop_node(self, node):
"""
Stop the node by passing in the node object, does not work with
instance store backed instances
@param node: Node which should be used
@type node: L{Node}
@rtype: C{bool}
"""
params = {'Action': 'StopInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def destroy_node(self, node):
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_terminate_boolean(res)
class EC2NodeDriver(BaseEC2NodeDriver):
"""
Amazon EC2 node driver.
"""
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2'
website = 'http://aws.amazon.com/ec2/'
path = '/'
region_name = 'us-east-1'
country = 'USA'
api_name = 'ec2_us_east'
features = {'create_node': ['ssh_key']}
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED
}
class IdempotentParamError(LibcloudError):
"""
Request used the same client token as a previous,
but non-identical request.
"""
def __str__(self):
return repr(self.value)
class EC2EUConnection(EC2Connection):
"""
Connection class for EC2 in the Western Europe Region
"""
host = REGION_DETAILS['eu-west-1']['endpoint']
class EC2EUNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western Europe Region.
"""
api_name = 'ec2_eu_west'
name = 'Amazon EC2 (eu-west-1)'
friendly_name = 'Amazon Europe Ireland'
country = 'IE'
region_name = 'eu-west-1'
connectionCls = EC2EUConnection
class EC2USWestConnection(EC2Connection):
"""
Connection class for EC2 in the Western US Region
"""
host = REGION_DETAILS['us-west-1']['endpoint']
class EC2USWestNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western US Region
"""
api_name = 'ec2_us_west'
name = 'Amazon EC2 (us-west-1)'
friendly_name = 'Amazon US N. California'
country = 'US'
region_name = 'us-west-1'
connectionCls = EC2USWestConnection
class EC2USWestOregonConnection(EC2Connection):
"""
Connection class for EC2 in the Western US Region (Oregon).
"""
host = REGION_DETAILS['us-west-2']['endpoint']
class EC2USWestOregonNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the US West Oregon region.
"""
api_name = 'ec2_us_west_oregon'
name = 'Amazon EC2 (us-west-2)'
friendly_name = 'Amazon US West - Oregon'
country = 'US'
region_name = 'us-west-2'
connectionCls = EC2USWestOregonConnection
class EC2APSEConnection(EC2Connection):
"""
Connection class for EC2 in the Southeast Asia Pacific Region.
"""
host = REGION_DETAILS['ap-southeast-1']['endpoint']
class EC2APNEConnection(EC2Connection):
"""
Connection class for EC2 in the Northeast Asia Pacific Region.
"""
host = REGION_DETAILS['ap-northeast-1']['endpoint']
class EC2APSENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific Region.
"""
api_name = 'ec2_ap_southeast'
name = 'Amazon EC2 (ap-southeast-1)'
friendly_name = 'Amazon Asia-Pacific Singapore'
country = 'SG'
region_name = 'ap-southeast-1'
connectionCls = EC2APSEConnection
class EC2APNENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Northeast Asia Pacific Region.
"""
api_name = 'ec2_ap_northeast'
name = 'Amazon EC2 (ap-northeast-1)'
friendly_name = 'Amazon Asia-Pacific Tokyo'
country = 'JP'
region_name = 'ap-northeast-1'
connectionCls = EC2APNEConnection
class EC2SAEastConnection(EC2Connection):
"""
Connection class for EC2 in the South America (Sao Paulo) Region.
"""
host = REGION_DETAILS['sa-east-1']['endpoint']
class EC2SAEastNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the South America (Sao Paulo) Region.
"""
api_name = 'ec2_sa_east'
name = 'Amazon EC2 (sa-east-1)'
friendly_name = 'Amazon South America Sao Paulo'
country = 'BR'
region_name = 'sa-east-1'
connectionCls = EC2SAEastConnection
class EC2APSESydneyConnection(EC2Connection):
"""
Connection class for EC2 in the Southeast Asia Pacific (Sydney) Region.
"""
host = REGION_DETAILS['ap-southeast-2']['endpoint']
class EC2APSESydneyNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific (Sydney) Region.
"""
api_name = 'ec2_ap_southeast_2'
name = 'Amazon EC2 (ap-southeast-2)'
friendly_name = 'Amazon Asia-Pacific Sydney'
country = 'AU'
region_name = 'ap-southeast-2'
connectionCls = EC2APSESydneyConnection
class EucConnection(EC2Connection):
"""
Connection class for Eucalyptus
"""
host = None
class EucNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Eucalyptus
"""
name = 'Eucalyptus'
website = 'http://www.eucalyptus.com/'
api_name = 'ec2_us_east'
region_name = 'us-east-1'
connectionCls = EucConnection
def __init__(self, key, secret=None, secure=True, host=None,
path=None, port=None):
"""
@inherits: L{EC2NodeDriver.__init__}
@param path: The host where the API can be reached.
@type path: C{str}
"""
super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
if path is None:
path = "/services/Eucalyptus"
self.path = path
def list_locations(self):
raise NotImplementedError(
'list_locations not implemented for this driver')
def _add_instance_filter(self, params, node):
"""
Eucalyptus driver doesn't support filtering on instance id so this is a
no-op.
"""
pass
def ex_create_tags(self, resource, tags):
"""
Eucalyptus doesn't support creating tags, so this is a passthrough.
@inherits: L{EC2NodeDriver.ex_create_tags}
"""
pass
class NimbusConnection(EC2Connection):
"""
Connection class for Nimbus
"""
host = None
class NimbusNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Nimbus
"""
type = Provider.NIMBUS
name = 'Nimbus'
website = 'http://www.nimbusproject.org/'
country = 'Private'
api_name = 'nimbus'
region_name = 'nimbus'
friendly_name = 'Nimbus Private Cloud'
connectionCls = NimbusConnection
def ex_describe_addresses(self, nodes):
"""
Nimbus doesn't support elastic IPs, so this is a passthrough.
@inherits: L{EC2NodeDriver.ex_describe_addresses}
"""
nodes_elastic_ip_mappings = {}
for node in nodes:
# empty list per node
nodes_elastic_ip_mappings[node.id] = []
return nodes_elastic_ip_mappings
def ex_create_tags(self, resource, tags):
"""
Nimbus doesn't support creating tags, so this is a passthrough.
@inherits: L{EC2NodeDriver.ex_create_tags}
"""
pass
| {
"content_hash": "410ea69fb559fa17b5130b85b9f08fb3",
"timestamp": "",
"source": "github",
"line_count": 1648,
"max_line_length": 79,
"avg_line_length": 32.28580097087379,
"alnum_prop": 0.5320164639990979,
"repo_name": "ConPaaS-team/conpaas",
"id": "c07a0828444a25dccb5d26ef69e7f21832e4b77e",
"size": "53989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conpaas-director/cpsdirector/iaas/libcloud/compute/drivers/ec2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "79"
},
{
"name": "Batchfile",
"bytes": "2136"
},
{
"name": "C",
"bytes": "12346"
},
{
"name": "CSS",
"bytes": "47680"
},
{
"name": "HTML",
"bytes": "5494"
},
{
"name": "Java",
"bytes": "404303"
},
{
"name": "JavaScript",
"bytes": "164519"
},
{
"name": "M4",
"bytes": "553"
},
{
"name": "Makefile",
"bytes": "78772"
},
{
"name": "Nginx",
"bytes": "1980"
},
{
"name": "PHP",
"bytes": "1900634"
},
{
"name": "Python",
"bytes": "2842443"
},
{
"name": "Shell",
"bytes": "232043"
},
{
"name": "Smarty",
"bytes": "15450"
}
],
"symlink_target": ""
} |
'use strict';
angular
.module('app.controllers')
.controller('AuthCtrl', function($scope, $location, Restangular) {
Restangular.one('people/me')
.get()
.then(function(data) {
$scope.user = data;
})
.catch(function(res) {
var status = res.data.errors[0].status;
if (status === '401') {
$scope.signIn();
}
});
$scope.signIn = function() {
var url = 'https://api.tnyu.org/v2/auth/facebook?success=' +
window.encodeURIComponent($location.absUrl());
window.location = url;
};
$scope.signOut = function() {
var url = 'https://api.tnyu.org/v2/auth/facebook/logout?doExternalServiceLogout=true&success=' +
window.encodeURIComponent('http://google.com/');
window.location = url;
};
});
| {
"content_hash": "6d89928f5d5d28ef5e23a78342939761",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 98,
"avg_line_length": 25,
"alnum_prop": 0.6510344827586206,
"repo_name": "TechAtNYU/intranet",
"id": "75ebf861b40a4fa7cd0ef598315b9b1915006e2c",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/js/controllers/auth-ctrl.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23780"
},
{
"name": "HTML",
"bytes": "233942"
},
{
"name": "JavaScript",
"bytes": "183639"
}
],
"symlink_target": ""
} |
"""Standardize samples.
"""
# Author: Taku Yoshioka, Shohei Shimizu
# License: MIT
from copy import deepcopy
import numpy as np
def standardize_samples(xs, standardize):
xs = deepcopy(xs)
if standardize == 'keepratio':
s = np.std(xs)
xs = (xs - np.mean(xs, axis=0)) / s
elif standardize == 'scaling':
xs = xs / np.std(xs, axis=0)
elif standardize == 'commonscaling':
xs = xs / np.std(xs)
elif standardize is True:
xs = (xs - np.mean(xs, axis=0)) / np.std(xs, axis=0)
elif standardize is False:
xs = xs
else:
raise ValueError("Invalid value of standardize: %s" % standardize)
return xs
| {
"content_hash": "17350239fac28f425abe805dea999f08",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 74,
"avg_line_length": 27.03846153846154,
"alnum_prop": 0.5789473684210527,
"repo_name": "taku-y/bmlingam",
"id": "768e17c3072928e4c88cca59ca60e9799bb9c485",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bmlingam/utils/standardize_samples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198143"
},
{
"name": "Shell",
"bytes": "2085"
}
],
"symlink_target": ""
} |
// UIColor+DDGAppInformationAddition.m
//
// Copyright (c) 2014 DU DA GMBH (http://www.dudagroup.com)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#import <Foundation/Foundation.h>
@interface NSBundle (DDGAdditions)
/**
Returns the name of the application.
@return The name of the application.
*/
@property (nonatomic, readonly) NSString* appName;
/**
Returns the version of the application.
@return The version of the application.
*/
@property (nonatomic, readonly) NSString* appVersion;
@end | {
"content_hash": "3da1ab20a13bb2ea0a5685c1f18e0ff7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 36.595238095238095,
"alnum_prop": 0.7586206896551724,
"repo_name": "dudagroup/DDGFoundation",
"id": "9dec1da6d9035a13e2253a0ada6d313b0c12cc9f",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/additions/NSBundle+DDGAdditions.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2031"
},
{
"name": "Objective-C",
"bytes": "32974"
},
{
"name": "Ruby",
"bytes": "498"
}
],
"symlink_target": ""
} |
.class public Landroid/drm/DrmStore$DrmDeliveryType;
.super Ljava/lang/Object;
.source "DrmStore.java"
# annotations
.annotation system Ldalvik/annotation/EnclosingClass;
value = Landroid/drm/DrmStore;
.end annotation
.annotation system Ldalvik/annotation/InnerClass;
accessFlags = 0x9
name = "DrmDeliveryType"
.end annotation
# static fields
.field public static final COMBINED_DELIVERY:I = 0x2
.field public static final FORWARD_LOCK:I = 0x1
.field public static final SEPARATE_DELIVERY:I = 0x3
.field public static final SEPARATE_DELIVERY_FL:I = 0x4
# direct methods
.method public constructor <init>()V
.locals 0
.prologue
.line 254
invoke-direct {p0}, Ljava/lang/Object;-><init>()V
return-void
.end method
| {
"content_hash": "475542fb4299660bd6ad8918fadd0381",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 55,
"avg_line_length": 20.97222222222222,
"alnum_prop": 0.7417218543046358,
"repo_name": "Liberations/Flyme5_devices_base_cm",
"id": "98e1f65882b33b5e770a78867a1250d451abb4f0",
"size": "755",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor/aosp/framework.jar.out/smali/android/drm/DrmStore$DrmDeliveryType.smali",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GLSL",
"bytes": "1500"
},
{
"name": "HTML",
"bytes": "96769"
},
{
"name": "Makefile",
"bytes": "11209"
},
{
"name": "Python",
"bytes": "1195"
},
{
"name": "Shell",
"bytes": "55270"
},
{
"name": "Smali",
"bytes": "160321888"
}
],
"symlink_target": ""
} |
#ifndef BOOST_ASIO_SEQ_PACKET_SOCKET_SERVICE_HPP
#define BOOST_ASIO_SEQ_PACKET_SOCKET_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#include <cstddef>
#include <boost/asio/async_result.hpp>
#include <boost/asio/detail/type_traits.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/io_service.hpp>
#if defined(BOOST_ASIO_WINDOWS_RUNTIME)
# include <boost/asio/detail/null_socket_service.hpp>
#elif defined(BOOST_ASIO_HAS_IOCP)
# include <boost/asio/detail/win_iocp_socket_service.hpp>
#else
# include <boost/asio/detail/reactive_socket_service.hpp>
#endif
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
/// Default service implementation for a sequenced packet socket.
template <typename Protocol>
class seq_packet_socket_service
#if defined(GENERATING_DOCUMENTATION)
: public boost::asio::io_service::service
#else
: public boost::asio::detail::service_base<
seq_packet_socket_service<Protocol> >
#endif
{
public:
#if defined(GENERATING_DOCUMENTATION)
/// The unique service identifier.
static boost::asio::io_service::id id;
#endif
/// The protocol type.
typedef Protocol protocol_type;
/// The endpoint type.
typedef typename Protocol::endpoint endpoint_type;
private:
// The type of the platform-specific implementation.
#if defined(BOOST_ASIO_WINDOWS_RUNTIME)
typedef detail::null_socket_service<Protocol> service_impl_type;
#elif defined(BOOST_ASIO_HAS_IOCP)
typedef detail::win_iocp_socket_service<Protocol> service_impl_type;
#else
typedef detail::reactive_socket_service<Protocol> service_impl_type;
#endif
public:
/// The type of a sequenced packet socket implementation.
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined implementation_type;
#else
typedef typename service_impl_type::implementation_type implementation_type;
#endif
/// (Deprecated: Use native_handle_type.) The native socket type.
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined native_type;
#else
typedef typename service_impl_type::native_handle_type native_type;
#endif
/// The native socket type.
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined native_handle_type;
#else
typedef typename service_impl_type::native_handle_type native_handle_type;
#endif
/// Construct a new sequenced packet socket service for the specified
/// io_service.
explicit seq_packet_socket_service(boost::asio::io_service& io_service)
: boost::asio::detail::service_base<
seq_packet_socket_service<Protocol> >(io_service),
service_impl_(io_service)
{
}
/// Construct a new sequenced packet socket implementation.
void construct(implementation_type& impl)
{
service_impl_.construct(impl);
}
#if defined(BOOST_ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Move-construct a new sequenced packet socket implementation.
void move_construct(implementation_type& impl,
implementation_type& other_impl)
{
service_impl_.move_construct(impl, other_impl);
}
/// Move-assign from another sequenced packet socket implementation.
void move_assign(implementation_type& impl,
seq_packet_socket_service& other_service,
implementation_type& other_impl)
{
service_impl_.move_assign(impl, other_service.service_impl_, other_impl);
}
/// Move-construct a new sequenced packet socket implementation from another
/// protocol type.
template <typename Protocol1>
void converting_move_construct(implementation_type& impl,
typename seq_packet_socket_service<
Protocol1>::implementation_type& other_impl,
typename enable_if<is_convertible<
Protocol1, Protocol>::value>::type* = 0)
{
service_impl_.template converting_move_construct<Protocol1>(
impl, other_impl);
}
#endif // defined(BOOST_ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Destroy a sequenced packet socket implementation.
void destroy(implementation_type& impl)
{
service_impl_.destroy(impl);
}
/// Open a sequenced packet socket.
boost::system::error_code open(implementation_type& impl,
const protocol_type& protocol, boost::system::error_code& ec)
{
if (protocol.type() == BOOST_ASIO_OS_DEF(SOCK_SEQPACKET))
service_impl_.open(impl, protocol, ec);
else
ec = boost::asio::error::invalid_argument;
return ec;
}
/// Assign an existing native socket to a sequenced packet socket.
boost::system::error_code assign(implementation_type& impl,
const protocol_type& protocol, const native_handle_type& native_socket,
boost::system::error_code& ec)
{
return service_impl_.assign(impl, protocol, native_socket, ec);
}
/// Determine whether the socket is open.
bool is_open(const implementation_type& impl) const
{
return service_impl_.is_open(impl);
}
/// Close a sequenced packet socket implementation.
boost::system::error_code close(implementation_type& impl,
boost::system::error_code& ec)
{
return service_impl_.close(impl, ec);
}
/// (Deprecated: Use native_handle().) Get the native socket implementation.
native_type native(implementation_type& impl)
{
return service_impl_.native_handle(impl);
}
/// Get the native socket implementation.
native_handle_type native_handle(implementation_type& impl)
{
return service_impl_.native_handle(impl);
}
/// Cancel all asynchronous operations associated with the socket.
boost::system::error_code cancel(implementation_type& impl,
boost::system::error_code& ec)
{
return service_impl_.cancel(impl, ec);
}
/// Determine whether the socket is at the out-of-band data mark.
bool at_mark(const implementation_type& impl,
boost::system::error_code& ec) const
{
return service_impl_.at_mark(impl, ec);
}
/// Determine the number of bytes available for reading.
std::size_t available(const implementation_type& impl,
boost::system::error_code& ec) const
{
return service_impl_.available(impl, ec);
}
/// Bind the sequenced packet socket to the specified local endpoint.
boost::system::error_code bind(implementation_type& impl,
const endpoint_type& endpoint, boost::system::error_code& ec)
{
return service_impl_.bind(impl, endpoint, ec);
}
/// Connect the sequenced packet socket to the specified endpoint.
boost::system::error_code connect(implementation_type& impl,
const endpoint_type& peer_endpoint, boost::system::error_code& ec)
{
return service_impl_.connect(impl, peer_endpoint, ec);
}
/// Start an asynchronous connect.
template <typename ConnectHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(ConnectHandler,
void (boost::system::error_code))
async_connect(implementation_type& impl,
const endpoint_type& peer_endpoint,
BOOST_ASIO_MOVE_ARG(ConnectHandler) handler)
{
detail::async_result_init<
ConnectHandler, void (boost::system::error_code)> init(
BOOST_ASIO_MOVE_CAST(ConnectHandler)(handler));
service_impl_.async_connect(impl, peer_endpoint, init.handler);
return init.result.get();
}
/// Set a socket option.
template <typename SettableSocketOption>
boost::system::error_code set_option(implementation_type& impl,
const SettableSocketOption& option, boost::system::error_code& ec)
{
return service_impl_.set_option(impl, option, ec);
}
/// Get a socket option.
template <typename GettableSocketOption>
boost::system::error_code get_option(const implementation_type& impl,
GettableSocketOption& option, boost::system::error_code& ec) const
{
return service_impl_.get_option(impl, option, ec);
}
/// Perform an IO control command on the socket.
template <typename IoControlCommand>
boost::system::error_code io_control(implementation_type& impl,
IoControlCommand& command, boost::system::error_code& ec)
{
return service_impl_.io_control(impl, command, ec);
}
/// Gets the non-blocking mode of the socket.
bool non_blocking(const implementation_type& impl) const
{
return service_impl_.non_blocking(impl);
}
/// Sets the non-blocking mode of the socket.
boost::system::error_code non_blocking(implementation_type& impl,
bool mode, boost::system::error_code& ec)
{
return service_impl_.non_blocking(impl, mode, ec);
}
/// Gets the non-blocking mode of the native socket implementation.
bool native_non_blocking(const implementation_type& impl) const
{
return service_impl_.native_non_blocking(impl);
}
/// Sets the non-blocking mode of the native socket implementation.
boost::system::error_code native_non_blocking(implementation_type& impl,
bool mode, boost::system::error_code& ec)
{
return service_impl_.native_non_blocking(impl, mode, ec);
}
/// Get the local endpoint.
endpoint_type local_endpoint(const implementation_type& impl,
boost::system::error_code& ec) const
{
return service_impl_.local_endpoint(impl, ec);
}
/// Get the remote endpoint.
endpoint_type remote_endpoint(const implementation_type& impl,
boost::system::error_code& ec) const
{
return service_impl_.remote_endpoint(impl, ec);
}
/// Disable sends or receives on the socket.
boost::system::error_code shutdown(implementation_type& impl,
socket_base::shutdown_type what, boost::system::error_code& ec)
{
return service_impl_.shutdown(impl, what, ec);
}
/// Send the given data to the peer.
template <typename ConstBufferSequence>
std::size_t send(implementation_type& impl,
const ConstBufferSequence& buffers,
socket_base::message_flags flags, boost::system::error_code& ec)
{
return service_impl_.send(impl, buffers, flags, ec);
}
/// Start an asynchronous send.
template <typename ConstBufferSequence, typename WriteHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(WriteHandler,
void (boost::system::error_code, std::size_t))
async_send(implementation_type& impl,
const ConstBufferSequence& buffers,
socket_base::message_flags flags,
BOOST_ASIO_MOVE_ARG(WriteHandler) handler)
{
detail::async_result_init<
WriteHandler, void (boost::system::error_code, std::size_t)> init(
BOOST_ASIO_MOVE_CAST(WriteHandler)(handler));
service_impl_.async_send(impl, buffers, flags, init.handler);
return init.result.get();
}
/// Receive some data from the peer.
template <typename MutableBufferSequence>
std::size_t receive(implementation_type& impl,
const MutableBufferSequence& buffers, socket_base::message_flags in_flags,
socket_base::message_flags& out_flags, boost::system::error_code& ec)
{
return service_impl_.receive_with_flags(impl,
buffers, in_flags, out_flags, ec);
}
/// Start an asynchronous receive.
template <typename MutableBufferSequence, typename ReadHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(ReadHandler,
void (boost::system::error_code, std::size_t))
async_receive(implementation_type& impl,
const MutableBufferSequence& buffers, socket_base::message_flags in_flags,
socket_base::message_flags& out_flags,
BOOST_ASIO_MOVE_ARG(ReadHandler) handler)
{
detail::async_result_init<
ReadHandler, void (boost::system::error_code, std::size_t)> init(
BOOST_ASIO_MOVE_CAST(ReadHandler)(handler));
service_impl_.async_receive_with_flags(impl,
buffers, in_flags, out_flags, init.handler);
return init.result.get();
}
private:
// Destroy all user-defined handler objects owned by the service.
void shutdown_service()
{
service_impl_.shutdown_service();
}
// The platform-specific implementation.
service_impl_type service_impl_;
};
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // BOOST_ASIO_SEQ_PACKET_SOCKET_SERVICE_HPP
| {
"content_hash": "9d6a92881685a738e03741a803cbbb26",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 80,
"avg_line_length": 33.14209115281501,
"alnum_prop": 0.6908267270668177,
"repo_name": "Truthsayer1/Ember",
"id": "9adbeb0fe2a31fe7032085fb37d321ca67f5acb3",
"size": "12681",
"binary": false,
"copies": "2",
"ref": "refs/heads/patch-1",
"path": "inc/boost_1_55_0/boost/asio/seq_packet_socket_service.hpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "51312"
},
{
"name": "Batchfile",
"bytes": "1731"
},
{
"name": "C",
"bytes": "5939002"
},
{
"name": "C++",
"bytes": "97911838"
},
{
"name": "CMake",
"bytes": "179"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50620"
},
{
"name": "M4",
"bytes": "10000"
},
{
"name": "Makefile",
"bytes": "55601"
},
{
"name": "NSIS",
"bytes": "5914"
},
{
"name": "Objective-C",
"bytes": "858"
},
{
"name": "Objective-C++",
"bytes": "6899"
},
{
"name": "Perl",
"bytes": "6275"
},
{
"name": "Python",
"bytes": "54355"
},
{
"name": "QMake",
"bytes": "14043"
},
{
"name": "Shell",
"bytes": "9237"
}
],
"symlink_target": ""
} |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ResidentialManager
{
class LegalDocuments :Document
{
}
}
| {
"content_hash": "ba0c62ebac28cbde5e1d8a5a614d81a1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 34,
"avg_line_length": 17.5,
"alnum_prop": 0.7095238095238096,
"repo_name": "ttitto/TelerikAcademy",
"id": "c6b207b9e8817f2a0ea8c77eaafc5f62f7ee23dc",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSharpOOP/Projects/ResidentialManager/ResidentialManager/LegalDocuments.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "1495784"
},
{
"name": "CSS",
"bytes": "177530"
},
{
"name": "JavaScript",
"bytes": "23583"
},
{
"name": "PowerShell",
"bytes": "177966"
}
],
"symlink_target": ""
} |
<?php
namespace App\Test\TestCase\Controller;
use App\Controller\PlayersController;
use Cake\TestSuite\IntegrationTestCase;
/**
* App\Controller\PlayersController Test Case
*/
class PlayersControllerTest extends IntegrationTestCase {
/**
* Fixtures
*
* @var array
*/
public $fixtures = [
'app.players',
'app.player_specialisations',
'app.clubs',
'app.leagues',
'app.teams',
'app.batsmen',
'app.innings',
'app.matches',
'app.venues',
'app.formats',
'app.wickets',
'app.bowlers',
'app.squads'
];
/**
* testIndex method
*
* @return void
*/
public function testIndex() {
$this->markTestIncomplete('testIndex not implemented.');
}
/**
* testView method
*
* @return void
*/
public function testView() {
$this->markTestIncomplete('testView not implemented.');
}
/**
* testAdd method
*
* @return void
*/
public function testAdd() {
$this->markTestIncomplete('testAdd not implemented.');
}
/**
* testEdit method
*
* @return void
*/
public function testEdit() {
$this->markTestIncomplete('testEdit not implemented.');
}
/**
* testDelete method
*
* @return void
*/
public function testDelete() {
$this->markTestIncomplete('testDelete not implemented.');
}
}
| {
"content_hash": "3d5dfff0a868e1b44ebf26a2bfc58283",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 59,
"avg_line_length": 15.76923076923077,
"alnum_prop": 0.6658536585365854,
"repo_name": "davidyell/Cricketeer",
"id": "56b6c148b546cce9a378ab8f9a092ddaec9f1645",
"size": "1230",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/TestCase/Controller/PlayersControllerTest.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "310"
},
{
"name": "Batchfile",
"bytes": "955"
},
{
"name": "CSS",
"bytes": "4363"
},
{
"name": "HTML",
"bytes": "7450"
},
{
"name": "JavaScript",
"bytes": "44108"
},
{
"name": "PHP",
"bytes": "394738"
},
{
"name": "Ruby",
"bytes": "1773"
},
{
"name": "Shell",
"bytes": "1352"
}
],
"symlink_target": ""
} |
/// <reference path="../Assets/Vectors/Vector2d.ts" />
/// <reference path="../Interfaces/IMoveable.ts" />
/// <reference path="../Interfaces/IUpdateable.ts" />
/// <reference path="../GameTime.ts" />
var EndGate;
(function (EndGate) {
(function (MovementControllers) {
/**
* Abstract class that holds moveable objects and synchronizes positions across them.
*/
var MovementController = (function () {
/**
* Should only ever be called by derived classes.
* @param moveables Moveable objects to synchronize.
*/
function MovementController(moveables) {
this.Position = moveables.length > 0 ? moveables[0].Position : EndGate.Vector2d.Zero;
this.Velocity = EndGate.Vector2d.Zero;
this.Rotation = 0;
this._frozen = false;
this._moveables = moveables;
}
/**
* Prevents the MovementController from updating object locations.
*/
MovementController.prototype.Freeze = function () {
this._frozen = true;
};
/**
* Used to re-enable movement within the MovementController.
*/
MovementController.prototype.Thaw = function () {
this._frozen = false;
};
/**
* Determines if the MovementController is moving. Frozen MovementControllers are not considered moving.
*/
MovementController.prototype.IsMoving = function () {
return !this._frozen && !this.Velocity.IsZero();
};
/**
* Synchronizes the current position with all tracked moveable objects. MovementController's must be updated in order to move.
* @param gameTime The current game time object.
*/
MovementController.prototype.Update = function (gameTime) {
for (var i = 0; i < this._moveables.length; i++) {
this._moveables[i].Position = this.Position;
this._moveables[i].Rotation = this.Rotation;
}
};
return MovementController;
})();
MovementControllers.MovementController = MovementController;
})(EndGate.MovementControllers || (EndGate.MovementControllers = {}));
var MovementControllers = EndGate.MovementControllers;
})(EndGate || (EndGate = {}));
| {
"content_hash": "c95d662bf4800e2805a3309b1992471e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 138,
"avg_line_length": 41.6,
"alnum_prop": 0.5612980769230769,
"repo_name": "NTaylorMullen/EndGate",
"id": "4882eb55bbc5fbb09220ce10f1dd97b04269b362",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EndGate/EndGate.Core.JS/MovementControllers/MovementController.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "114"
},
{
"name": "C#",
"bytes": "5720"
},
{
"name": "CSS",
"bytes": "13516"
},
{
"name": "JavaScript",
"bytes": "1337546"
},
{
"name": "PowerShell",
"bytes": "356"
},
{
"name": "Shell",
"bytes": "298"
},
{
"name": "TypeScript",
"bytes": "647350"
}
],
"symlink_target": ""
} |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package tests.gl_320.fbo;
import com.jogamp.opengl.GL;
import static com.jogamp.opengl.GL2GL3.*;
import com.jogamp.opengl.GL3;
import com.jogamp.opengl.util.GLBuffers;
import com.jogamp.opengl.util.glsl.ShaderCode;
import com.jogamp.opengl.util.glsl.ShaderProgram;
import glm.glm;
import glm.mat._4.Mat4;
import framework.BufferUtils;
import framework.Profile;
import framework.Semantic;
import framework.Test;
import glm.vec._2.Vec2;
import glm.vec._4.Vec4;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.ShortBuffer;
import java.util.logging.Level;
import java.util.logging.Logger;
import jgli.Texture2d;
/**
*
* @author GBarbieri
*/
public class Gl_320_fbo_srgb_decode_ext extends Test {
public static void main(String[] args) {
Gl_320_fbo_srgb_decode_ext gl_320_srgb_decode_ext = new Gl_320_fbo_srgb_decode_ext();
}
public Gl_320_fbo_srgb_decode_ext() {
super("gl-320-fbo-srgb-decode-ext", Profile.CORE, 3, 2);
}
private final String SHADERS_SOURCE_TEXTURE = "fbo-srgb-decode";
private final String SHADERS_SOURCE_SPLASH = "fbo-srgb-decode-blit";
private final String SHADERS_ROOT = "src/data/gl_320/fbo";
private final String TEXTURE_DIFFUSE = "kueken7_rgba8_srgb.dds";
private int vertexCount = 4;
private int vertexSize = vertexCount * glf.Vertex_v2fv2f.SIZE;
private float[] vertexData = {
-1.0f, -1.0f,/**/ 0.0f, 1.0f,
+1.0f, -1.0f,/**/ 1.0f, 1.0f,
+1.0f, +1.0f,/**/ 1.0f, 0.0f,
-1.0f, +1.0f,/**/ 0.0f, 0.0f};
private int elementCount = 6;
private int elementSize = elementCount * Short.BYTES;
private short[] elementData = {
0, 1, 2,
2, 3, 0};
private class Buffer {
public static final int VERTEX = 0;
public static final int ELEMENT = 1;
public static final int TRANSFORM = 2;
public static final int MAX = 3;
}
private class Texture {
public static final int DIFFUSE = 0;
public static final int COLORBUFFER = 1;
public static final int RENDERBUFFER = 2;
public static final int MAX = 3;
}
private class Program {
public static final int TEXTURE = 0;
public static final int SPLASH = 1;
public static final int MAX = 2;
}
private class Shader {
public static final int VERT_TEXTURE = 0;
public static final int FRAG_TEXTURE = 1;
public static final int VERT_SPLASH = 2;
public static final int FRAG_SPLASH = 3;
public static final int MAX = 4;
}
private int framebufferScale = 2, uniformTransform;
private int[] programName = new int[Program.MAX], uniformDiffuse = new int[Program.MAX];
private IntBuffer vertexArrayName = GLBuffers.newDirectIntBuffer(Program.MAX),
framebufferName = GLBuffers.newDirectIntBuffer(1), bufferName = GLBuffers.newDirectIntBuffer(Buffer.MAX),
textureName = GLBuffers.newDirectIntBuffer(Texture.MAX);
private FloatBuffer clearColorSRGB
= GLBuffers.newDirectFloatBuffer(new Vec4(1.0f, 0.5f, 0.0f, 1.0f).convertLinearToSRGB().toFA_()),
clearDepth = GLBuffers.newDirectFloatBuffer(new float[]{1.0f});
@Override
protected boolean begin(GL gl) {
GL3 gl3 = (GL3) gl;
boolean validated = true;
if (validated) {
validated = initProgram(gl3);
}
if (validated) {
validated = initBuffer(gl3);
}
if (validated) {
validated = initVertexArray(gl3);
}
if (validated) {
validated = initTexture(gl3);
}
if (validated) {
validated = initFramebuffer(gl3);
}
return validated;
}
private boolean initProgram(GL3 gl3) {
boolean validated = true;
ShaderCode[] shaderCode = new ShaderCode[Shader.MAX];
if (validated) {
shaderCode[Shader.VERT_TEXTURE] = ShaderCode.create(gl3, GL_VERTEX_SHADER, this.getClass(), SHADERS_ROOT,
null, SHADERS_SOURCE_TEXTURE, "vert", null, true);
shaderCode[Shader.FRAG_TEXTURE] = ShaderCode.create(gl3, GL_FRAGMENT_SHADER, this.getClass(), SHADERS_ROOT,
null, SHADERS_SOURCE_TEXTURE, "frag", null, true);
ShaderProgram shaderProgram = new ShaderProgram();
shaderProgram.add(shaderCode[Shader.VERT_TEXTURE]);
shaderProgram.add(shaderCode[Shader.FRAG_TEXTURE]);
shaderProgram.init(gl3);
programName[Program.TEXTURE] = shaderProgram.program();
gl3.glBindAttribLocation(programName[Program.TEXTURE], Semantic.Attr.POSITION, "position");
gl3.glBindAttribLocation(programName[Program.TEXTURE], Semantic.Attr.TEXCOORD, "texCoord");
gl3.glBindFragDataLocation(programName[Program.TEXTURE], Semantic.Frag.COLOR, "color");
shaderProgram.link(gl3, System.out);
}
if (validated) {
shaderCode[Shader.VERT_SPLASH] = ShaderCode.create(gl3, GL_VERTEX_SHADER, this.getClass(), SHADERS_ROOT,
null, SHADERS_SOURCE_SPLASH, "vert", null, true);
shaderCode[Shader.FRAG_SPLASH] = ShaderCode.create(gl3, GL_FRAGMENT_SHADER, this.getClass(), SHADERS_ROOT,
null, SHADERS_SOURCE_SPLASH, "frag", null, true);
ShaderProgram shaderProgram = new ShaderProgram();
shaderProgram.add(shaderCode[Shader.VERT_SPLASH]);
shaderProgram.add(shaderCode[Shader.FRAG_SPLASH]);
shaderProgram.init(gl3);
programName[Program.SPLASH] = shaderProgram.program();
gl3.glBindFragDataLocation(programName[Program.SPLASH], Semantic.Frag.COLOR, "color");
shaderProgram.link(gl3, System.out);
}
if (validated) {
uniformTransform = gl3.glGetUniformBlockIndex(programName[Program.TEXTURE], "Transform");
uniformDiffuse[Program.TEXTURE] = gl3.glGetUniformLocation(programName[Program.TEXTURE], "diffuse");
uniformDiffuse[Program.SPLASH] = gl3.glGetUniformLocation(programName[Program.SPLASH], "diffuse");
gl3.glUseProgram(programName[Program.TEXTURE]);
gl3.glUniform1i(uniformDiffuse[Program.TEXTURE], 0);
gl3.glUniformBlockBinding(programName[Program.TEXTURE], uniformTransform, Semantic.Uniform.TRANSFORM0);
gl3.glUseProgram(programName[Program.SPLASH]);
gl3.glUniform1i(uniformDiffuse[Program.SPLASH], 0);
}
return validated & checkError(gl3, "initProgram");
}
private boolean initBuffer(GL3 gl3) {
ShortBuffer elementBuffer = GLBuffers.newDirectShortBuffer(elementData);
FloatBuffer vertexBuffer = GLBuffers.newDirectFloatBuffer(vertexData);
IntBuffer uniformBufferOffset = GLBuffers.newDirectIntBuffer(1);
gl3.glGenBuffers(Buffer.MAX, bufferName);
gl3.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName.get(Buffer.ELEMENT));
gl3.glBufferData(GL_ELEMENT_ARRAY_BUFFER, elementSize, elementBuffer, GL_STATIC_DRAW);
gl3.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
gl3.glBindBuffer(GL_ARRAY_BUFFER, bufferName.get(Buffer.VERTEX));
gl3.glBufferData(GL_ARRAY_BUFFER, vertexSize, vertexBuffer, GL_STATIC_DRAW);
gl3.glBindBuffer(GL_ARRAY_BUFFER, 0);
gl3.glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, uniformBufferOffset);
int uniformBlockSize = Math.max(Mat4.SIZE, uniformBufferOffset.get(0));
gl3.glBindBuffer(GL_UNIFORM_BUFFER, bufferName.get(Buffer.TRANSFORM));
gl3.glBufferData(GL_UNIFORM_BUFFER, uniformBlockSize, null, GL_DYNAMIC_DRAW);
gl3.glBindBuffer(GL_UNIFORM_BUFFER, 0);
BufferUtils.destroyDirectBuffer(elementBuffer);
BufferUtils.destroyDirectBuffer(vertexBuffer);
BufferUtils.destroyDirectBuffer(uniformBufferOffset);
return true;
}
private boolean initTexture(GL3 gl3) {
boolean validated = true;
try {
jgli.Texture2d texture = new Texture2d(jgli.Load.load(TEXTURE_ROOT + "/" + TEXTURE_DIFFUSE));
assert (!texture.empty());
gl3.glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
gl3.glGenTextures(Texture.MAX, textureName);
gl3.glActiveTexture(GL_TEXTURE0);
gl3.glBindTexture(GL_TEXTURE_2D, textureName.get(Texture.DIFFUSE));
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, texture.levels() - 1);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SRGB_DECODE_EXT, GL_SKIP_DECODE_EXT);
jgli.Gl.Format format = jgli.Gl.translate(texture.format());
for (int level = 0; level < texture.levels(); ++level) {
gl3.glTexImage2D(GL_TEXTURE_2D, level,
format.internal.value,
texture.dimensions(level)[0], texture.dimensions(level)[1],
0,
format.external.value, format.type.value,
texture.data(level));
}
gl3.glActiveTexture(GL_TEXTURE0);
gl3.glBindTexture(GL_TEXTURE_2D, textureName.get(Texture.COLORBUFFER));
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl3.glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, windowSize.x * framebufferScale,
windowSize.y * framebufferScale, 0, GL_RGBA, GL_UNSIGNED_BYTE, null);
gl3.glActiveTexture(GL_TEXTURE0);
gl3.glBindTexture(GL_TEXTURE_2D, textureName.get(Texture.RENDERBUFFER));
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
gl3.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
gl3.glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, windowSize.x * framebufferScale,
windowSize.y * framebufferScale, 0, GL_DEPTH_COMPONENT, GL_FLOAT, null);
gl3.glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
} catch (IOException ex) {
Logger.getLogger(Gl_320_fbo_srgb_decode_ext.class.getName()).log(Level.SEVERE, null, ex);
}
return validated;
}
private boolean initVertexArray(GL3 gl3) {
gl3.glGenVertexArrays(Program.MAX, vertexArrayName);
gl3.glBindVertexArray(vertexArrayName.get(Program.TEXTURE));
{
gl3.glBindBuffer(GL_ARRAY_BUFFER, bufferName.get(Buffer.VERTEX));
gl3.glVertexAttribPointer(Semantic.Attr.POSITION, 2, GL_FLOAT, false, glf.Vertex_v2fv2f.SIZE, 0);
gl3.glVertexAttribPointer(Semantic.Attr.TEXCOORD, 2, GL_FLOAT, false, glf.Vertex_v2fv2f.SIZE, Vec2.SIZE);
gl3.glBindBuffer(GL_ARRAY_BUFFER, 0);
gl3.glEnableVertexAttribArray(Semantic.Attr.POSITION);
gl3.glEnableVertexAttribArray(Semantic.Attr.TEXCOORD);
gl3.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName.get(Buffer.ELEMENT));
}
gl3.glBindVertexArray(0);
gl3.glBindVertexArray(vertexArrayName.get(Program.SPLASH));
gl3.glBindVertexArray(0);
return true;
}
private boolean initFramebuffer(GL3 gl3) {
gl3.glGenFramebuffers(1, framebufferName);
gl3.glBindFramebuffer(GL_FRAMEBUFFER, framebufferName.get(0));
gl3.glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, textureName.get(Texture.COLORBUFFER), 0);
gl3.glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, textureName.get(Texture.RENDERBUFFER), 0);
if (!isFramebufferComplete(gl3, framebufferName.get(0))) {
return false;
}
gl3.glBindFramebuffer(GL_FRAMEBUFFER, 0);
return true;
}
@Override
protected boolean render(GL gl) {
GL3 gl3 = (GL3) gl;
{
gl3.glBindBuffer(GL_UNIFORM_BUFFER, bufferName.get(Buffer.TRANSFORM));
ByteBuffer pointer = gl3.glMapBufferRange(GL_UNIFORM_BUFFER,
0, Mat4.SIZE, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
Mat4 projection = glm.perspective_((float) Math.PI * 0.25f, (float) windowSize.x / windowSize.y, 0.1f, 100.0f);
pointer.asFloatBuffer().put(projection.mul(viewMat4()).toFa_());
// Make sure the uniform buffer is uploaded
gl3.glUnmapBuffer(GL_UNIFORM_BUFFER);
}
{
gl3.glEnable(GL_DEPTH_TEST);
gl3.glDepthFunc(GL_LESS);
gl3.glViewport(0, 0, windowSize.x * framebufferScale, windowSize.y * framebufferScale);
gl3.glBindFramebuffer(GL_FRAMEBUFFER, framebufferName.get(0));
gl3.glClearBufferfv(GL_DEPTH, 0, clearDepth);
gl3.glClearBufferfv(GL_COLOR, 0, clearColorSRGB);
// TextureName[texture::DIFFUSE] is a sRGB texture which sRGB conversion on fetch has been disabled
// Hence in the shader, the value is stored as sRGB so we should not convert it to sRGB.
gl3.glUseProgram(programName[Program.TEXTURE]);
gl3.glActiveTexture(GL_TEXTURE0);
gl3.glBindTexture(GL_TEXTURE_2D, textureName.get(Texture.DIFFUSE));
gl3.glBindVertexArray(vertexArrayName.get(Program.TEXTURE));
gl3.glBindBufferBase(GL_UNIFORM_BUFFER, Semantic.Uniform.TRANSFORM0, bufferName.get(Buffer.TRANSFORM));
gl3.glDrawElementsInstancedBaseVertex(GL_TRIANGLES, elementCount, GL_UNSIGNED_SHORT, 0, 2, 0);
}
{
gl3.glDisable(GL_DEPTH_TEST);
gl3.glViewport(0, 0, windowSize.x, windowSize.y);
gl3.glBindFramebuffer(GL_FRAMEBUFFER, 0);
gl3.glUseProgram(programName[Program.SPLASH]);
gl3.glActiveTexture(GL_TEXTURE0);
gl3.glBindVertexArray(vertexArrayName.get(Program.SPLASH));
gl3.glBindTexture(GL_TEXTURE_2D, textureName.get(Texture.COLORBUFFER));
gl3.glDrawArraysInstanced(GL_TRIANGLES, 0, 3, 1);
}
return true;
}
@Override
protected boolean end(GL gl) {
GL3 gl3 = (GL3) gl;
gl3.glDeleteFramebuffers(1, framebufferName);
gl3.glDeleteProgram(programName[Program.SPLASH]);
gl3.glDeleteProgram(programName[Program.TEXTURE]);
gl3.glDeleteBuffers(Buffer.MAX, bufferName);
gl3.glDeleteTextures(Texture.MAX, textureName);
gl3.glDeleteVertexArrays(Program.MAX, vertexArrayName);
BufferUtils.destroyDirectBuffer(framebufferName);
BufferUtils.destroyDirectBuffer(bufferName);
BufferUtils.destroyDirectBuffer(textureName);
BufferUtils.destroyDirectBuffer(vertexArrayName);
BufferUtils.destroyDirectBuffer(clearColorSRGB);
BufferUtils.destroyDirectBuffer(clearDepth);
return true;
}
}
| {
"content_hash": "e2a7eb9449b510485b7fd291439e9d4e",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 123,
"avg_line_length": 40.14179104477612,
"alnum_prop": 0.6354960649439177,
"repo_name": "elect86/jogl-samples",
"id": "25c1b71472c5628f39c535eb3cda06d29de7ac35",
"size": "16137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jogl-samples/src/tests/gl_320/fbo/Gl_320_fbo_srgb_decode_ext.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1322"
},
{
"name": "GLSL",
"bytes": "234683"
},
{
"name": "HTML",
"bytes": "106288"
},
{
"name": "Java",
"bytes": "2843404"
},
{
"name": "Shell",
"bytes": "9112"
}
],
"symlink_target": ""
} |
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __adc_H
#define __adc_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "stm32f4xx_hal.h"
/* USER CODE BEGIN Includes */
/* USER CODE END Includes */
extern ADC_HandleTypeDef hadc1;
/* USER CODE BEGIN Private defines */
/* USER CODE END Private defines */
extern void Error_Handler(void);
void MX_ADC1_Init(void);
/* USER CODE BEGIN Prototypes */
/* USER CODE END Prototypes */
#ifdef __cplusplus
}
#endif
#endif /*__ adc_H */
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| {
"content_hash": "cfc416c5377ec34ccdfa946765cc4db6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 17.88372093023256,
"alnum_prop": 0.4928478543563069,
"repo_name": "Aleksey710/Test_STM32F407VET6",
"id": "1eb31d5654b43f6561724dea5d13ffeb83204a2d",
"size": "2791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CubeMX/Inc/adc.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "24855"
},
{
"name": "C",
"bytes": "29632850"
},
{
"name": "C++",
"bytes": "236590"
},
{
"name": "HTML",
"bytes": "10914"
},
{
"name": "Objective-C",
"bytes": "1848"
},
{
"name": "QML",
"bytes": "39025"
}
],
"symlink_target": ""
} |
import * as connectionsApi from '../api/connections-api';
export const REQUEST_CONNECTIONS_DEPARTURING_NEXT = 'REQUEST_CONNECTIONS_DEPARTURING_NEXT';
export const REQUEST_CONNECTIONS_BY_PLACES = 'REQUEST_CONNECTIONS_BY_PLACES';
export const RECEIVE_CONNECTIONS_DEPARTURING_NEXT = 'RECEIVE_CONNECTIONS_DEPARTURING_NEXT';
export const RECEIVE_CONNECTIONS_BY_PLACES = 'RECEIVE_CONNECTIONS_BY_PLACES';
export function requestConnectionsDeparturingNext() {
return {
type: REQUEST_CONNECTIONS_DEPARTURING_NEXT
};
}
export function requestConnectionsByPlaces() {
return {
type: REQUEST_CONNECTIONS_BY_PLACES
};
}
export function receiveConnectionsDeparturingNext(connections) {
return {
type: RECEIVE_CONNECTIONS_DEPARTURING_NEXT,
payload: {
connections
}
};
}
export function receiveConnectionsByPlaces(connections) {
return {
type: RECEIVE_CONNECTIONS_BY_PLACES,
payload: {
connections
}
};
}
// Async actions
export function fetchConnectionsDeparturingNext() {
return (dispatch) => {
dispatch(requestConnectionsDeparturingNext());
return connectionsApi.getConnectionsDeparturingNext()
.then(
response => dispatch(receiveConnectionsDeparturingNext(response.body)),
err => {});
}
}
export function fetchConnectionsByPlaces(from, to) {
return (dispatch) => {
dispatch(requestConnectionsByPlaces());
return connectionsApi.getConnectionsByPlaces(from, to)
.then(
response => dispatch(receiveConnectionsByPlaces(response.body)),
err => {});
}
}
| {
"content_hash": "4023757e21c566213c44702442bb6e02",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 91,
"avg_line_length": 27.086206896551722,
"alnum_prop": 0.7275620623806492,
"repo_name": "jamppa/busa-web",
"id": "3db9e8c1e29d64aac34602ffcfbf38763c611619",
"size": "1571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/actions/connections-actions.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1197"
},
{
"name": "JavaScript",
"bytes": "18943"
},
{
"name": "Shell",
"bytes": "101"
}
],
"symlink_target": ""
} |
package org.fossasia.pslab.fragment;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import org.fossasia.pslab.R;
import butterknife.ButterKnife;
import butterknife.Unbinder;
/**
* Created by viveksb007 on 15/3/17.
*/
public class DesignExperiments extends Fragment {
private Unbinder unbinder;
public static DesignExperiments newInstance() {
DesignExperiments designExperiments = new DesignExperiments();
return designExperiments;
}
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.design_experiments_fragment, container, false);
unbinder = ButterKnife.bind(this,view);
return view;
}
@Override public void onDestroyView() {
super.onDestroyView();
unbinder.unbind();
}
}
| {
"content_hash": "eaf7f0297804131a6359cf4069a5fb20",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 123,
"avg_line_length": 26.475,
"alnum_prop": 0.7337110481586402,
"repo_name": "akarshan96/pslab-android",
"id": "3051ca120318b781d78cc73829078a099e6c2756",
"size": "1059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/src/main/java/org/fossasia/pslab/fragment/DesignExperiments.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9478"
},
{
"name": "HTML",
"bytes": "200180"
},
{
"name": "Java",
"bytes": "741229"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
} |
package net.newstring.crux.table.common;
/**
* ExcelSourceBuilder
* 表文件,
* @author lic
* @date 2018/4/9
*/
public class ExcelSourceBuilder {
}
| {
"content_hash": "85e01dfe4861a31a9d948e7d4dcc2c9b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 40,
"avg_line_length": 13.636363636363637,
"alnum_prop": 0.6866666666666666,
"repo_name": "aaron218/Crux",
"id": "e6ccdf3814af5db6ed230ac62c446781ea35a4cb",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crux-table/src/main/java/net/newstring/crux/table/common/ExcelSourceBuilder.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "323356"
}
],
"symlink_target": ""
} |
import contextlib
import eventlet
from heat.common import timeutils
from heat.engine import dependencies
from heat.engine import scheduler
from heat.tests import common
class DummyTask(object):
def __init__(self, num_steps=3):
self.num_steps = num_steps
def __call__(self, *args, **kwargs):
for i in range(1, self.num_steps + 1):
self.do_step(i, *args, **kwargs)
yield
def do_step(self, step_num, *args, **kwargs):
pass
class ExceptionGroupTest(common.HeatTestCase):
def test_contains_exceptions(self):
exception_group = scheduler.ExceptionGroup()
self.assertIsInstance(exception_group.exceptions, list)
def test_can_be_initialized_with_a_list_of_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertIn(ex1, exception_group.exceptions)
self.assertIn(ex2, exception_group.exceptions)
def test_can_add_exceptions_after_init(self):
ex = Exception()
exception_group = scheduler.ExceptionGroup()
exception_group.exceptions.append(ex)
self.assertIn(ex, exception_group.exceptions)
def test_str_representation_aggregates_all_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertEqual("[u'ex 1', u'ex 2']", str(exception_group))
class DependencyTaskGroupTest(common.HeatTestCase):
def setUp(self):
super(DependencyTaskGroupTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
self.aggregate_exceptions = False
self.error_wait_time = None
self.reverse_order = False
@contextlib.contextmanager
def _dep_test(self, *edges):
dummy = DummyTask(getattr(self, 'steps', 3))
deps = dependencies.Dependencies(edges)
tg = scheduler.DependencyTaskGroup(
deps, dummy, reverse=self.reverse_order,
error_wait_time=self.error_wait_time,
aggregate_exceptions=self.aggregate_exceptions)
self.m.StubOutWithMock(dummy, 'do_step')
yield dummy
self.m.ReplayAll()
scheduler.TaskRunner(tg)(wait_time=None)
def test_no_steps(self):
self.steps = 0
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
with self._dep_test(('second', 'first')):
scheduler.TaskRunner._sleep(None).AndReturn(None)
def test_single_node(self):
with self._dep_test(('only', None)) as dummy:
dummy.do_step(1, 'only').AndReturn(None)
dummy.do_step(2, 'only').AndReturn(None)
dummy.do_step(3, 'only').AndReturn(None)
def test_disjoint(self):
with self._dep_test(('1', None), ('2', None)) as dummy:
dummy.do_step(1, '1').InAnyOrder('1')
dummy.do_step(1, '2').InAnyOrder('1')
dummy.do_step(2, '1').InAnyOrder('2')
dummy.do_step(2, '2').InAnyOrder('2')
dummy.do_step(3, '1').InAnyOrder('3')
dummy.do_step(3, '2').InAnyOrder('3')
def test_single_fwd(self):
with self._dep_test(('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
def test_chain_fwd(self):
with self._dep_test(('third', 'second'),
('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
dummy.do_step(1, 'third').AndReturn(None)
dummy.do_step(2, 'third').AndReturn(None)
dummy.do_step(3, 'third').AndReturn(None)
def test_diamond_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid1').InAnyOrder('1')
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(2, 'mid1').InAnyOrder('2')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(3, 'mid1').InAnyOrder('3')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_complex_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'mid3'), ('mid1', 'first'),
('mid3', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(1, 'mid3').InAnyOrder('1')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(2, 'mid3').InAnyOrder('2')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(3, 'mid3').InAnyOrder('3')
dummy.do_step(1, 'mid1').AndReturn(None)
dummy.do_step(2, 'mid1').AndReturn(None)
dummy.do_step(3, 'mid1').AndReturn(None)
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_many_edges_fwd(self):
with self._dep_test(('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3')) as dummy:
dummy.do_step(1, 'e1').InAnyOrder('1edges')
dummy.do_step(1, 'e2').InAnyOrder('1edges')
dummy.do_step(1, 'e3').InAnyOrder('1edges')
dummy.do_step(2, 'e1').InAnyOrder('2edges')
dummy.do_step(2, 'e2').InAnyOrder('2edges')
dummy.do_step(2, 'e3').InAnyOrder('2edges')
dummy.do_step(3, 'e1').InAnyOrder('3edges')
dummy.do_step(3, 'e2').InAnyOrder('3edges')
dummy.do_step(3, 'e3').InAnyOrder('3edges')
dummy.do_step(1, 'mid3').AndReturn(None)
dummy.do_step(2, 'mid3').AndReturn(None)
dummy.do_step(3, 'mid3').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1mid')
dummy.do_step(1, 'mid1').InAnyOrder('1mid')
dummy.do_step(2, 'mid2').InAnyOrder('2mid')
dummy.do_step(2, 'mid1').InAnyOrder('2mid')
dummy.do_step(3, 'mid2').InAnyOrder('3mid')
dummy.do_step(3, 'mid1').InAnyOrder('3mid')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_dbldiamond_fwd(self):
with self._dep_test(('last', 'a1'), ('last', 'a2'),
('a1', 'b1'), ('a2', 'b1'), ('a2', 'b2'),
('b1', 'first'), ('b2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'b1').InAnyOrder('1b')
dummy.do_step(1, 'b2').InAnyOrder('1b')
dummy.do_step(2, 'b1').InAnyOrder('2b')
dummy.do_step(2, 'b2').InAnyOrder('2b')
dummy.do_step(3, 'b1').InAnyOrder('3b')
dummy.do_step(3, 'b2').InAnyOrder('3b')
dummy.do_step(1, 'a1').InAnyOrder('1a')
dummy.do_step(1, 'a2').InAnyOrder('1a')
dummy.do_step(2, 'a1').InAnyOrder('2a')
dummy.do_step(2, 'a2').InAnyOrder('2a')
dummy.do_step(3, 'a1').InAnyOrder('3a')
dummy.do_step(3, 'a2').InAnyOrder('3a')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_circular_deps(self):
d = dependencies.Dependencies([('first', 'second'),
('second', 'third'),
('third', 'first')])
self.assertRaises(dependencies.CircularDependencyException,
scheduler.DependencyTaskGroup, d)
def test_aggregate_exceptions_raises_all_at_the_end(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', None), ('C', None))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(1, 'C').InAnyOrder('1').AndRaise(e1)
dummy.do_step(2, 'A').InAnyOrder('2')
dummy.do_step(2, 'B').InAnyOrder('2').AndRaise(e2)
dummy.do_step(3, 'A').InAnyOrder('3')
e1 = Exception('e1')
e2 = Exception('e2')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1, e2)
self.assertEqual(set([e1, e2]), set(exc.exceptions))
def test_aggregate_exceptions_cancels_dependent_tasks_recursively(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_aggregate_exceptions_cancels_tasks_in_reverse_order(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.reverse_order = True
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'C').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_exception_grace_period(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.error_wait_time = 5
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
def test_exception_grace_period_expired(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.steps = 5
self.error_wait_time = 0.05
def sleep():
eventlet.sleep(self.error_wait_time)
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
dummy.do_step(4, 'B').WithSideEffects(sleep)
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
class TaskTest(common.HeatTestCase):
def setUp(self):
super(TaskTest, self).setUp()
scheduler.ENABLE_SLEEP = True
self.addCleanup(self.m.VerifyAll)
def test_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)(wait_time=42)
def test_start_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion()
def test_start_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion(wait_time=24)
def test_sleep(self):
sleep_time = 42
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).AndReturn(None)
eventlet.sleep(sleep_time).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=sleep_time)
def test_sleep_zero(self):
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=0)
def test_sleep_none(self):
self.m.StubOutWithMock(eventlet, 'sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
def test_args(self):
args = ['foo', 'bar']
kwargs = {'baz': 'quux', 'blarg': 'wibble'}
self.m.StubOutWithMock(DummyTask, '__call__')
task = DummyTask()
task(*args, **kwargs)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task, *args, **kwargs)
runner(wait_time=None)
def test_non_callable(self):
self.assertRaises(AssertionError, scheduler.TaskRunner, object())
def test_stepping(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertFalse(runner.step())
self.assertTrue(runner)
self.assertFalse(runner.step())
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_start_no_steps(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_start_only(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
def test_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner.start()
self.assertRaises(AssertionError, runner.start)
def test_start_cancelled(self):
runner = scheduler.TaskRunner(DummyTask())
runner.cancel()
self.assertRaises(AssertionError, runner.start)
def test_call_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
self.assertRaises(AssertionError, runner.start)
def test_start_function(self):
def task():
pass
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_repeated_done(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.step())
self.assertTrue(runner.step())
def test_timeout(self):
st = timeutils.wallclock()
def task():
while True:
yield
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertRaises(scheduler.Timeout, runner.step)
def test_timeout_return(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
return
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_timeout_swallowed(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
yield
self.fail('Task still running')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
self.assertTrue(runner.step())
def test_cancel_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel()
self.assertTrue(runner.done())
def test_cancel_done(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.step())
self.assertTrue(runner.done())
runner.cancel()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_cancel(self):
task = DummyTask(3)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel()
self.assertTrue(runner.step())
def test_cancel_grace_period(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_before_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=10)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_after_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=1.25)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=3)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertRaises(scheduler.Timeout, runner.step)
def test_cancel_grace_period_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel(grace_period=0.5)
self.assertTrue(runner.done())
class TimeoutTest(common.HeatTestCase):
def test_compare(self):
task = scheduler.TaskRunner(DummyTask())
earlier = scheduler.Timeout(task, 10)
eventlet.sleep(0.01)
later = scheduler.Timeout(task, 10)
self.assertTrue(earlier < later)
self.assertTrue(later > earlier)
self.assertEqual(earlier, earlier)
self.assertNotEqual(earlier, later)
class DescriptionTest(common.HeatTestCase):
def setUp(self):
super(DescriptionTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_func(self):
def f():
pass
self.assertEqual('f', scheduler.task_description(f))
def test_lambda(self):
l = lambda: None
self.assertEqual('<lambda>', scheduler.task_description(l))
def test_method(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def m(self):
pass
self.assertEqual('m from C "o"', scheduler.task_description(C().m))
def test_object(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def __call__(self):
pass
self.assertEqual('o', scheduler.task_description(C()))
class WrapperTaskTest(common.HeatTestCase):
def setUp(self):
super(WrapperTaskTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_wrap(self):
child_tasks = [DummyTask() for i in range(3)]
@scheduler.wrappertask
def task():
for child_task in child_tasks:
yield child_task()
yield
for child_task in child_tasks:
self.m.StubOutWithMock(child_task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
scheduler.TaskRunner._sleep(0).AndReturn(None)
for child_task in child_tasks:
child_task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_child_exception(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_child_exception_exit(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, next, task)
def test_child_exception_swallow(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield
else:
self.fail('No exception raised in parent_task')
yield
task = parent_task()
next(task)
next(task)
def test_child_exception_swallow_next(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
dummy = DummyTask()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
pass
else:
self.fail('No exception raised in parent_task')
yield dummy()
task = parent_task()
next(task)
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
for i in range(1, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_swallow_next(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
yield dummy()
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_raise(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
raise
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_exit(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
return
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_parent_exception(self):
class MyException(Exception):
pass
def child_task():
yield
@scheduler.wrappertask
def parent_task():
yield child_task()
raise MyException()
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_parent_throw(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, task.throw, MyException())
def test_parent_throw_exit(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, task.throw, MyException())
def test_parent_cancel(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_parent_cancel_exit(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel(self):
def child_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_parent_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
| {
"content_hash": "bfc84517087e2814f5a065ea3af5fbe1",
"timestamp": "",
"source": "github",
"line_count": 1193,
"max_line_length": 79,
"avg_line_length": 30.620284995808884,
"alnum_prop": 0.557021626060772,
"repo_name": "miguelgrinberg/heat",
"id": "64ef1086268e032893959c25199e3f6636425ecb",
"size": "37105",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heat/tests/engine/test_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6541741"
},
{
"name": "Shell",
"bytes": "33395"
}
],
"symlink_target": ""
} |
package com.google.common.testing.junit3;
import static com.google.common.testing.junit3.JUnitAsserts.assertContentsInOrder;
import com.google.common.testing.TearDown;
import com.google.common.testing.TearDownStack;
import com.google.common.testing.TestLogHandler;
import junit.framework.TestCase;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.LogRecord;
/**
* Unit test for {@link TearDownTestCase}.
*
* @author kevinb
*/
public class TearDownTestCaseTest extends TestCase {
private TearDownTestCase test;
private List<String> messages;
private TestLogHandler handler;
@Override protected void setUp() throws Exception {
test = new TearDownTestCase() {};
test.setUp();
messages = new ArrayList<String>();
handler = new TestLogHandler();
TearDownStack.logger.addHandler(handler);
TearDownStack.logger.setUseParentHandlers(false);
}
public void testAdHocTearDownObject() throws Exception {
final SomeObject obj = new SomeObject("a");
test.addTearDown(new TearDown() {
public void tearDown() {
messages.add(obj.desc);
}
});
test.stack.runTearDown();
assertNothingWasLogged();
assertContentsInOrder(messages, "a");
}
public void testReusableTearDownObject() throws Exception {
SomeObject obj = new SomeObject("b");
test.addTearDown(new SomeObjectTearDown(obj));
test.stack.runTearDown();
assertNothingWasLogged();
assertContentsInOrder(messages, "b");
}
public void testSelfCleaningObject() throws Exception {
TidyObject obj = new TidyObject("c");
test.addTearDown(obj);
test.stack.runTearDown();
assertNothingWasLogged();
assertContentsInOrder(messages, "c");
}
public void testReverseOrder() throws Exception {
test.addTearDown(new TidyObject("x"));
test.addTearDown(new TidyObject("y"));
test.addTearDown(new TidyObject("z"));
test.stack.runTearDown();
assertNothingWasLogged();
assertContentsInOrder(messages, "z", "y", "x");
}
public void testTearDownFailure() throws Exception {
test.addTearDown(new TidyObject("before"));
test.addTearDown(new FailingTearDown());
test.addTearDown(new TidyObject("after"));
assertNothingWasLogged();
test.stack.runTearDown();
assertFailureWasLogged();
assertContentsInOrder(messages, "after", "whoops", "before");
}
public void testDontSkipOptionalTearDowns() throws Exception {
test.addTearDown(new TidyObject("sometimes"));
test.addTearDown(new TidyObject("always"));
test.stack.runTearDown();
assertNothingWasLogged();
assertContentsInOrder(messages, "always", "sometimes");
}
public void testWithNoTestEnvironments() throws Throwable {
test.tearDown();
assertNothingWasLogged();
}
private void assertNothingWasLogged() {
assertTrue(handler.getStoredLogRecords().isEmpty());
}
private void assertFailureWasLogged() {
LogRecord record = handler.getStoredLogRecords().get(0);
assertEquals("exception thrown during tearDown: "
+ "Don't worry, this exception is expected.", record.getMessage());
assertEquals(Level.INFO, record.getLevel());
assertNotNull(record.getThrown());
}
/** This is deeply ironic. */
@Override protected void tearDown() throws Exception {
TearDownStack.logger.removeHandler(handler);
TearDownStack.logger.setUseParentHandlers(true);
super.tearDown();
}
private static class SomeObject {
String desc;
SomeObject(String desc) {
this.desc = desc;
}
}
private class TidyObject implements TearDown {
String desc;
TidyObject(String desc) {
this.desc = desc;
}
public void tearDown() {
messages.add(desc);
}
}
private class SomeObjectTearDown implements TearDown {
SomeObject toclean;
SomeObjectTearDown(SomeObject toclean) {
this.toclean = toclean;
}
public void tearDown() {
messages.add(toclean.desc);
}
}
private class FailingTearDown implements TearDown {
public void tearDown() {
messages.add("whoops");
doSomethingThatFails();
// never try to do two things in the same TearDown!
messages.add("this will never appear");
}
void doSomethingThatFails() {
throw new RuntimeException("Don't worry, this exception is expected.");
}
}
}
| {
"content_hash": "4578add088f17b38ca299002f419cb75",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 82,
"avg_line_length": 26.57831325301205,
"alnum_prop": 0.7019492293744334,
"repo_name": "zorzella/test-libraries-for-java",
"id": "75a913e4fe41b6a5eee75b1028f3610af4ae2fd7",
"size": "5006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/java/com/google/common/testing/junit3/TearDownTestCaseTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "68079"
}
],
"symlink_target": ""
} |
public class Solution
{
public ArrayList<Integer> inorderTraversal (TreeNode root)
{
ArrayList<Integer> ret = new ArrayList<Integer>();
Stack<TreeNode> stack = new Stack<TreeNode>();
if (root != null)
stack.push(root);
while (!stack.empty())
{
while (stack.peek().left != null)
stack.push(stack.peek().left);
while (!stack.empty() && stack.peek().right == null)
ret.add(stack.pop().val);
if (!stack.empty())
{
ret.add(stack.peek().val);
stack.push(stack.pop().right);
}
}
return ret;
}
}
| {
"content_hash": "15c12dd54c88312bed881e6a990b2d2e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 60,
"avg_line_length": 25.652173913043477,
"alnum_prop": 0.5661016949152542,
"repo_name": "starforever/leetcode",
"id": "6818c9249542e69bb568fa0c4617e21b2847fdfe",
"size": "590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "94/Solution_nostage.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1163"
},
{
"name": "Java",
"bytes": "167704"
},
{
"name": "Shell",
"bytes": "958"
}
],
"symlink_target": ""
} |
Simple application to run through an Options Pricing Distribution model for implied equity values.
| {
"content_hash": "23696c66a0785c373854254159abe053",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 98,
"avg_line_length": 99,
"alnum_prop": 0.8484848484848485,
"repo_name": "imdaveho/opm-calculator",
"id": "17787693d8023a0c931ea8d64bcd237d974d77e7",
"size": "116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4663"
},
{
"name": "HTML",
"bytes": "3248"
},
{
"name": "JavaScript",
"bytes": "25880"
}
],
"symlink_target": ""
} |
/*
Visualization adapted from Michael Bromley's Soundcloud visualizer.
https://github.com/michaelbromley/soundcloud-visualizer
*/
var MichaelBromleyVisualization = (function() {
/*
Amplitude Visualzation Template
*/
var amplitude_visualization_id = 'michaelbromley_visualization';
var amplitude_visualization_name = 'Michael Bromley Visualization';
var amplitude_container = '';
var amplitude_visualization_preferences = {
"width": 500,
"height": 500,
"fullscreen": false
};
var analyser = '';
var tileSize;
var tiles = [];
var stars = [];
/*
Variables that are specific to
the canvas and how it will display
the visualizations.
*/
var fgCanvas;
var fgCtx;
var fgRotation = 0.001;
var bgCanvas;
var bgCtx;
var sfCanvas;
var sfCtx;
var volume = 0;
var streamData = new Uint8Array(128);
var drawBgInterval;
var rotateForegroundInterval;
var sampleAudioStreamInterval;
var animationFrame;
/*
REQUIRED
Start visualization method. Initialize your visualization here.
*/
function startVisualization( ){
if( document.getElementById('amplitude-visualization') ){
analyser = Amplitude.analyser();
amplitude_container = document.getElementById('amplitude-visualization');
/*
Foreground Hexagons Layer
*/
fgCanvas = document.createElement('canvas');
fgCanvas.setAttribute('style', 'position: absolute; z-index: 10');
fgCtx = fgCanvas.getContext("2d");
amplitude_container.appendChild(fgCanvas);
/*
Middle Starfield Layer
*/
sfCanvas = document.createElement('canvas');
sfCtx = sfCanvas.getContext("2d");
sfCanvas.setAttribute('style', 'position: absolute; z-index: 5');
amplitude_container.appendChild(sfCanvas);
/*
Background Image Layer
*/
bgCanvas = document.createElement('canvas');
bgCtx = bgCanvas.getContext("2d");
amplitude_container.appendChild(bgCanvas);
makePolygonArray();
makeStarArray();
resizeCanvas();
draw();
sampleAudioStreamInterval = setInterval(sampleAudioStream, 20);
drawBgInterval = setInterval(drawBg, 100);
rotateForegroundInterval = setInterval(rotateForeground, 20);
/*
Resize the canvas to fill browser window dynamically
*/
window.addEventListener('resize', this.resizeCanvas, false);
}
}
/*
REQUIRED
Stop visualization method. Unbind ANY recurring visualization methods so
other visaulzations can fire on the audio tag.
*/
function stopVisualization(){
clearInterval(sampleAudioStreamInterval);
clearInterval(drawBgInterval);
clearInterval(rotateForegroundInterval);
window.cancelAnimationFrame(animationFrame);
amplitude_container.innerHTML = '';
}
/*
REQUIRED
If you don't have any preferences, you don't have to implement any
of this, just leave the stub and nothing will happen.
*/
function setPreferences( preferences ){
for(var key in amplitude_visualization_preferences){
if( preferences[key] != undefined) {
amplitude_visualization_preferences[key] = preferences[key];
}
}
}
function sampleAudioStream(){
analyser.getByteFrequencyData(streamData);
/*
Calculate an overall volume value
*/
var total = 0;
/*
Get the volume from the first 80 bins, else it gets too loud with treble
*/
for (var i = 0; i < 80; i++) {
total += streamData[i];
}
volume = total;
}
function Polygon(sides, x, y, tileSize, ctx, num) {
this.sides = sides;
this.tileSize = tileSize;
this.ctx = ctx;
/*
The number of the tile, starting at 0
*/
this.num = num;
/*
The highest colour value, which then fades out
*/
this.high = 0;
/*
Increase this value to fade out faster.
*/
this.decay = this.num > 42 ? 1.5 : 2;
/* For highlighted stroke effect
figure out the x and y coordinates of the center of the polygon based on the
60 degree XY axis coordinates passed in
*/
this.highlight = 0;
var step = Math.round(Math.cos(Math.PI/6)*tileSize*2);
this.y = Math.round(step * Math.sin(Math.PI/3) * -y );
this.x = Math.round(x * step + y * step/2 );
/*
Calculate the vertices of the polygon
*/
this.vertices = [];
for (var i = 1; i <= this.sides;i += 1) {
x = this.x + this.tileSize * Math.cos(i * 2 * Math.PI / this.sides + Math.PI/6);
y = this.y + this.tileSize * Math.sin(i * 2 * Math.PI / this.sides + Math.PI/6);
this.vertices.push([x, y]);
}
}
Polygon.prototype.rotateVertices = function() {
/*
Rotate all the vertices to achieve the overall rotational effect
*/
var rotation = fgRotation;
rotation -= analyser.volume > 10000 ? Math.sin(analyser.volume/800000) : 0;
for (var i = 0; i <= this.sides-1;i += 1) {
this.vertices[i][0] = this.vertices[i][0] - this.vertices[i][1] * Math.sin(rotation);
this.vertices[i][1] = this.vertices[i][1] + this.vertices[i][0] * Math.sin(rotation);
}
};
Polygon.prototype.calculateOffset = function(coords) {
var angle = Math.atan(coords[1]/coords[0]);
/*
A bit of pythagoras
*/
var distance = Math.sqrt(Math.pow(coords[0], 2) + Math.pow(coords[1], 2));
/*
This factor makes the visualization go crazy wild
*/
var mentalFactor = Math.min(Math.max((Math.tan(volume/6000) * 0.5), -20), 2);
var offsetFactor = Math.pow(distance/3, 2) * (volume/2000000) * (Math.pow(this.high, 1.3)/300) * mentalFactor;
var offsetX = Math.cos(angle) * offsetFactor;
var offsetY = Math.sin(angle) * offsetFactor;
offsetX *= (coords[0] < 0) ? -1 : 1;
offsetY *= (coords[0] < 0) ? -1 : 1;
return [offsetX, offsetY];
};
Polygon.prototype.drawPolygon = function() {
var bucket = Math.ceil(streamData.length/tiles.length*this.num);
var val = Math.pow((streamData[bucket]/255),2)*255;
val *= this.num > 42 ? 1.1 : 1;
/*
Establish the value for this tile
*/
if (val > this.high) {
this.high = val;
} else {
this.high -= this.decay;
val = this.high;
}
/*
Figure out what colour to fill it and then draw the polygon
*/
var r, g, b, a;
if (val > 0) {
this.ctx.beginPath();
var offset = this.calculateOffset(this.vertices[0]);
this.ctx.moveTo(this.vertices[0][0] + offset[0], this.vertices[0][1] + offset[1]);
/*
Draw the polygon
*/
for (var i = 1; i <= this.sides-1;i += 1) {
offset = this.calculateOffset(this.vertices[i]);
this.ctx.lineTo (this.vertices[i][0] + offset[0], this.vertices[i][1] + offset[1]);
}
this.ctx.closePath();
if (val > 128) {
r = (val-128)*2;
g = ((Math.cos((2*val/128*Math.PI/2)- 4*Math.PI/3)+1)*128);
b = (val-105)*3;
}else if (val > 175) {
r = (val-128)*2;
g = 255;
b = (val-105)*3;
}else {
r = ((Math.cos((2*val/128*Math.PI/2))+1)*128);
g = ((Math.cos((2*val/128*Math.PI/2)- 4*Math.PI/3)+1)*128);
b = ((Math.cos((2.4*val/128*Math.PI/2)- 2*Math.PI/3)+1)*128);
}
if (val > 210) {
/*
Add the cube effect if it's really loud
*/
this.cubed = val;
}
if (val > 120) {
/*
Add the highlight effect if it's pretty loud
*/
this.highlight = 100;
}
/*
Set the alpha
*/
var e = 2.7182;
a = (0.5/(1 + 40 * Math.pow(e, -val/8))) + (0.5/(1 + 40 * Math.pow(e, -val/20)));
this.ctx.fillStyle = "rgba(" +
Math.round(r) + ", " +
Math.round(g) + ", " +
Math.round(b) + ", " +
a + ")";
this.ctx.fill();
/*
Stroke
*/
if (val > 20) {
var strokeVal = 20;
this.ctx.strokeStyle = "rgba(" + strokeVal + ", " + strokeVal + ", " + strokeVal + ", 0.5)";
this.ctx.lineWidth = 1;
this.ctx.stroke();
}
}
};
Polygon.prototype.drawHighlight = function() {
this.ctx.beginPath();
/*
Draw the highlight
*/
var offset = this.calculateOffset(this.vertices[0]);
this.ctx.moveTo(this.vertices[0][0] + offset[0], this.vertices[0][1] + offset[1]);
/*
Draw the polygon
*/
for (var i = 0; i <= this.sides-1;i += 1) {
offset = this.calculateOffset(this.vertices[i]);
this.ctx.lineTo (this.vertices[i][0] + offset[0], this.vertices[i][1] + offset[1]);
}
this.ctx.closePath();
var a = this.highlight/100;
this.ctx.strokeStyle = "rgba(255, 255, 255, " + a + ")";
this.ctx.lineWidth = 1;
this.ctx.stroke();
this.highlight -= 0.5;
};
var makePolygonArray = function() {
tiles = [];
/**
* Arrange into a grid x, y, with the y axis at 60 degrees to the x, rather than
* the usual 90.
* @type {number}
*/
var i = 0;
/*
Unique number for each tile
*/
tiles.push(new Polygon(6, 0, 0, tileSize, fgCtx, i));
/*
The centre tile
*/
i++;
for (var layer = 1; layer < 7; layer++) {
tiles.push(new Polygon(6, 0, layer, tileSize, fgCtx, i)); i++;
tiles.push(new Polygon(6, 0, -layer, tileSize, fgCtx, i)); i++;
for(var x = 1; x < layer; x++) {
tiles.push(new Polygon(6, x, -layer, tileSize, fgCtx, i)); i++;
tiles.push(new Polygon(6, -x, layer, tileSize, fgCtx, i)); i++;
tiles.push(new Polygon(6, x, layer-x, tileSize, fgCtx, i)); i++;
tiles.push(new Polygon(6, -x, -layer+x, tileSize, fgCtx, i)); i++;
}
for(var y = -layer; y <= 0; y++) {
tiles.push(new Polygon(6, layer, y, tileSize, fgCtx, i)); i++;
tiles.push(new Polygon(6, -layer, -y, tileSize, fgCtx, i)); i++;
}
}
};
function Star(x, y, starSize, ctx) {
this.x = x;
this.y = y;
this.angle = Math.atan(Math.abs(y)/Math.abs(x));
this.starSize = starSize;
this.ctx = ctx;
this.high = 0;
}
Star.prototype.drawStar = function() {
var distanceFromCentre = Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2));
/*
Stars as lines
*/
var brightness = 200 + Math.min(Math.round(this.high * 5), 55);
this.ctx.lineWidth= 0.5 + distanceFromCentre/2000 * Math.max(this.starSize/2, 1);
this.ctx.strokeStyle='rgba(' + brightness + ', ' + brightness + ', ' + brightness + ', 1)';
this.ctx.beginPath();
this.ctx.moveTo(this.x,this.y);
var lengthFactor = 1 + Math.min(Math.pow(distanceFromCentre,2)/30000 * Math.pow(volume, 2)/6000000, distanceFromCentre);
var toX = Math.cos(this.angle) * -lengthFactor;
var toY = Math.sin(this.angle) * -lengthFactor;
toX *= this.x > 0 ? 1 : -1;
toY *= this.y > 0 ? 1 : -1;
this.ctx.lineTo(this.x + toX, this.y + toY);
this.ctx.stroke();
this.ctx.closePath();
/*
Starfield movement coming towards the camera
*/
var speed = lengthFactor/20 * this.starSize;
this.high -= Math.max(this.high - 0.0001, 0);
if (speed > this.high) {
this.high = speed;
}
var dX = Math.cos(this.angle) * this.high;
var dY = Math.sin(this.angle) * this.high;
this.x += this.x > 0 ? dX : -dX;
this.y += this.y > 0 ? dY : -dY;
var limitY = fgCanvas.height/2 + 500;
var limitX = fgCanvas.width/2 + 500;
if ((this.y > limitY || this.y < -limitY) || (this.x > limitX || this.x < -limitX)) {
/*
It has gone off the edge so respawn it somewhere near the middle.
*/
this.x = (Math.random() - 0.5) * fgCanvas.width/3;
this.y = (Math.random() - 0.5) * fgCanvas.height/3;
this.angle = Math.atan(Math.abs(this.y)/Math.abs(this.x));
}
};
var makeStarArray = function() {
var x, y, starSize;
stars = [];
var limit = fgCanvas.width / 15;
/*
How many stars?
*/
for (var i = 0; i < limit; i ++) {
x = (Math.random() - 0.5) * fgCanvas.width;
y = (Math.random() - 0.5) * fgCanvas.height;
starSize = (Math.random()+0.1)*3;
stars.push(new Star(x, y, starSize, sfCtx));
}
};
var drawBg = function() {
bgCtx.clearRect(0, 0, bgCanvas.width, bgCanvas.height);
var r, g, b, a;
var val = volume/1000;
r = 200 + (Math.sin(val) + 1) * 28;
g = val * 2;
b = val * 8;
a = Math.sin(val+3*Math.PI/2) + 1;
bgCtx.beginPath();
bgCtx.rect(0, 0, bgCanvas.width, bgCanvas.height);
/*
Create radial gradient
*/
var grd = bgCtx.createRadialGradient(bgCanvas.width/2, bgCanvas.height/2, val, bgCanvas.width/2, bgCanvas.height/2, bgCanvas.width-Math.min(Math.pow(val, 2.7), bgCanvas.width - 20));
/*
Centre is transparent black
*/
grd.addColorStop(0, 'rgba(0,0,0,0)');
grd.addColorStop(0.8, "rgba(" +
Math.round(r) + ", " +
Math.round(g) + ", " +
Math.round(b) + ", 0.4)");
bgCtx.fillStyle = grd;
bgCtx.fill();
};
resizeCanvas = function() {
if (fgCanvas) {
if( amplitude_visualization_preferences.fullscreen ){
/*
Resize the foreground canvas
*/
fgCanvas.width = window.innerWidth;
fgCanvas.height = window.innerHeight;
fgCtx.translate(fgCanvas.width/2,fgCanvas.height/2);
/*
Resize the bg canvas
*/
bgCanvas.width = window.innerWidth;
bgCanvas.height = window.innerHeight;
/*
Resize the starfield canvas
*/
sfCanvas.width = window.innerWidth;
bgCanvas.height = window.innerHeight;
sfCtx.translate(fgCanvas.width/2,fgCanvas.height/2);
}else{
/*
Resize the foreground canvas
*/
fgCanvas.width = amplitude_visualization_preferences.width;
fgCanvas.height = amplitude_visualization_preferences.height;
fgCtx.translate(fgCanvas.width/2,fgCanvas.height/2);
/*
Resize the bg canvas
*/
bgCanvas.width = amplitude_visualization_preferences.width;
bgCanvas.height = amplitude_visualization_preferences.height;
/*
Resize the starfield canvas
*/
sfCanvas.width = amplitude_visualization_preferences.width;
bgCanvas.height = amplitude_visualization_preferences.height;
sfCtx.translate(fgCanvas.width/2,fgCanvas.height/2);
}
tileSize = fgCanvas.width > fgCanvas.height ? fgCanvas.width / 25 : fgCanvas.height / 25;
drawBg();
makePolygonArray();
makeStarArray();
}
};
var rotateForeground = function() {
tiles.forEach(function(tile) {
tile.rotateVertices();
});
};
var draw = function() {
fgCtx.clearRect(-fgCanvas.width, -fgCanvas.height, fgCanvas.width*2, fgCanvas.height *2);
sfCtx.clearRect(-fgCanvas.width/2, -fgCanvas.height/2, fgCanvas.width, fgCanvas.height);
stars.forEach(function(star) {
star.drawStar();
});
tiles.forEach(function(tile) {
tile.drawPolygon();
});
tiles.forEach(function(tile) {
if (tile.highlight > 0) {
tile.drawHighlight();
}
});
animationFrame = window.requestAnimationFrame(draw);
};
/*
REQUIRED
Accessors for communicating with the visualization.
*/
return {
startVisualization: startVisualization,
stopVisualization: stopVisualization,
setPreferences: setPreferences,
getName: amplitude_visualization_name,
getID: amplitude_visualization_id
}
})(); | {
"content_hash": "f5639892a55e332c58fba4f52cc88246",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 184,
"avg_line_length": 26.942592592592593,
"alnum_prop": 0.630558801292185,
"repo_name": "ushiang/amplitudejs",
"id": "989f4bd5729b700918d5dccbb1447e52d26b9522",
"size": "14549",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "visualizations/michaelbromley.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "130296"
}
],
"symlink_target": ""
} |
namespace google {
namespace cloud {
namespace pubsublite_internal {
GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_BEGIN
std::shared_ptr<SubscriberServiceStub> CreateDefaultSubscriberServiceStub(
google::cloud::CompletionQueue cq, Options const& options) {
auto auth = google::cloud::internal::CreateAuthenticationStrategy(
std::move(cq), options);
auto channel = auth->CreateChannel(options.get<EndpointOption>(),
internal::MakeChannelArguments(options));
auto service_grpc_stub =
google::cloud::pubsublite::v1::SubscriberService::NewStub(channel);
std::shared_ptr<SubscriberServiceStub> stub =
std::make_shared<DefaultSubscriberServiceStub>(
std::move(service_grpc_stub));
if (auth->RequiresConfigureContext()) {
stub = std::make_shared<SubscriberServiceAuth>(std::move(auth),
std::move(stub));
}
stub = std::make_shared<SubscriberServiceMetadata>(std::move(stub));
if (internal::Contains(options.get<TracingComponentsOption>(), "rpc")) {
GCP_LOG(INFO) << "Enabled logging for gRPC calls";
stub = std::make_shared<SubscriberServiceLogging>(
std::move(stub), options.get<GrpcTracingOptionsOption>(),
options.get<TracingComponentsOption>());
}
return stub;
}
GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_END
} // namespace pubsublite_internal
} // namespace cloud
} // namespace google
| {
"content_hash": "5e1be121de3d4e47c0eb50a9920e6cc7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 41.05714285714286,
"alnum_prop": 0.6854558107167711,
"repo_name": "googleapis/google-cloud-cpp",
"id": "c03a9dcc3cddd3ff1e87a7cab16d61f8af5f2cb9",
"size": "2798",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/pubsublite/internal/subscriber_stub_factory.cc",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2387"
},
{
"name": "Batchfile",
"bytes": "3052"
},
{
"name": "C",
"bytes": "21004"
},
{
"name": "C++",
"bytes": "41174129"
},
{
"name": "CMake",
"bytes": "1350320"
},
{
"name": "Dockerfile",
"bytes": "111570"
},
{
"name": "Makefile",
"bytes": "138270"
},
{
"name": "PowerShell",
"bytes": "41266"
},
{
"name": "Python",
"bytes": "21338"
},
{
"name": "Shell",
"bytes": "249894"
},
{
"name": "Starlark",
"bytes": "722015"
}
],
"symlink_target": ""
} |
Sample init scripts and service configuration for bitcoind
==========================================================
Sample scripts and configuration files for systemd, Upstart and OpenRC
can be found in the contrib/init folder.
contrib/init/bitcoind.service: systemd service unit configuration
contrib/init/bitcoind.openrc: OpenRC compatible SysV style init script
contrib/init/bitcoind.openrcconf: OpenRC conf.d file
contrib/init/bitcoind.conf: Upstart service configuration file
contrib/init/bitcoind.init: CentOS compatible SysV style init script
1. Service User
---------------------------------
All three startup configurations assume the existence of a "beardcoin" user
and group. They must be created before attempting to use these scripts.
2. Configuration
---------------------------------
At a bare minimum, bitcoind requires that the rpcpassword setting be set
when running as a daemon. If the configuration file does not exist or this
setting is not set, bitcoind will shutdown promptly after startup.
This password does not have to be remembered or typed as it is mostly used
as a fixed token that bitcoind and client programs read from the configuration
file, however it is recommended that a strong and secure password be used
as this password is security critical to securing the wallet should the
wallet be enabled.
If bitcoind is run with the "-server" flag (set by default), and no rpcpassword is set,
it will use a special cookie file for authentication. The cookie is generated with random
content when the daemon starts, and deleted when it exits. Read access to this file
controls who can access it through RPC.
By default the cookie is stored in the data directory, but it's location can be overridden
with the option '-rpccookiefile'.
This allows for running bitcoind without having to do any manual configuration.
`conf`, `pid`, and `wallet` accept relative paths which are interpreted as
relative to the data directory. `wallet` *only* supports relative paths.
For an example configuration file that describes the configuration settings,
see `contrib/debian/examples/beardcoin.conf`.
3. Paths
---------------------------------
All three configurations assume several paths that might need to be adjusted.
Binary: `/usr/bin/bitcoind`
Configuration file: `/etc/beardcoin/beardcoin.conf`
Data directory: `/var/lib/bitcoind`
PID file: `/var/run/bitcoind/bitcoind.pid` (OpenRC and Upstart) or `/var/lib/bitcoind/bitcoind.pid` (systemd)
Lock file: `/var/lock/subsys/bitcoind` (CentOS)
The configuration file, PID directory (if applicable) and data directory
should all be owned by the beardcoin user and group. It is advised for security
reasons to make the configuration file and data directory only readable by the
beardcoin user and group. Access to beardcoin-cli and other bitcoind rpc clients
can then be controlled by group membership.
4. Installing Service Configuration
-----------------------------------
4a) systemd
Installing this .service file consists of just copying it to
/usr/lib/systemd/system directory, followed by the command
`systemctl daemon-reload` in order to update running systemd configuration.
To test, run `systemctl start bitcoind` and to enable for system startup run
`systemctl enable bitcoind`
4b) OpenRC
Rename bitcoind.openrc to bitcoind and drop it in /etc/init.d. Double
check ownership and permissions and make it executable. Test it with
`/etc/init.d/bitcoind start` and configure it to run on startup with
`rc-update add bitcoind`
4c) Upstart (for Debian/Ubuntu based distributions)
Drop bitcoind.conf in /etc/init. Test by running `service bitcoind start`
it will automatically start on reboot.
NOTE: This script is incompatible with CentOS 5 and Amazon Linux 2014 as they
use old versions of Upstart and do not supply the start-stop-daemon utility.
4d) CentOS
Copy bitcoind.init to /etc/init.d/bitcoind. Test by running `service bitcoind start`.
Using this script, you can adjust the path and flags to the bitcoind program by
setting the BITCOIND and FLAGS environment variables in the file
/etc/sysconfig/bitcoind. You can also use the DAEMONOPTS environment variable here.
5. Auto-respawn
-----------------------------------
Auto respawning is currently only configured for Upstart and systemd.
Reasonable defaults have been chosen but YMMV.
| {
"content_hash": "ef5d2224a8df66af45d5fa48f346cbd0",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 122,
"avg_line_length": 42.304761904761904,
"alnum_prop": 0.7420081044574516,
"repo_name": "jn2840/bitcoin",
"id": "5711e626d20fbb3c0cf8053ef89673358c02da8b",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/init.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "540912"
},
{
"name": "C++",
"bytes": "3850991"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18520"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2102"
},
{
"name": "Makefile",
"bytes": "69779"
},
{
"name": "Objective-C",
"bytes": "2022"
},
{
"name": "Objective-C++",
"bytes": "7246"
},
{
"name": "Protocol Buffer",
"bytes": "2316"
},
{
"name": "Python",
"bytes": "486092"
},
{
"name": "QMake",
"bytes": "2022"
},
{
"name": "Shell",
"bytes": "33728"
}
],
"symlink_target": ""
} |
namespace Tests
{
using System;
using GitVersion;
using NUnit.Framework;
using Shouldly;
[TestFixture]
public class VariableProviderTests
{
[Test]
public void DevelopBranchFormatsSemVerForCiFeed()
{
var semVer = new SemanticVersion
{
Major = 1,
Minor = 2,
Patch = 3,
PreReleaseTag = "unstable.4",
BuildMetaData = "5.Branch.develop"
};
semVer.BuildMetaData.ReleaseDate = new ReleaseDate
{
OriginalCommitSha = "originalCommitSha",
OriginalDate = DateTimeOffset.Parse("2014-03-01 00:00:01Z"),
CommitSha = "commitSha",
Date = DateTimeOffset.Parse("2014-03-06 23:59:59Z")
};
var vars = VariableProvider.GetVariablesFor(semVer);
vars[VariableProvider.SemVer].ShouldBe("1.2.3.5-unstable");
}
[TestCase(2, 3, 4, "5.Branch.master", AssemblyVersioningScheme.None, true, "1.0.0.0", "2.3.4.5")]
[TestCase(2, 3, 4, "5.Branch.master", AssemblyVersioningScheme.None, false, "1.0.0.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.master", AssemblyVersioningScheme.Major, true, "2.0.0.0", "2.3.4.5")]
[TestCase(2, 3, 4, "5.Branch.master", AssemblyVersioningScheme.Major, false, "2.0.0.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.master", AssemblyVersioningScheme.MajorMinorPatch, true, "2.3.4.0", "2.3.4.5")]
[TestCase(2, 3, 4, "5.Branch.master", AssemblyVersioningScheme.MajorMinorPatch, false, "2.3.4.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.develop", AssemblyVersioningScheme.None, true, "1.0.0.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.develop", AssemblyVersioningScheme.None, false, "1.0.0.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.develop", AssemblyVersioningScheme.Major, true, "2.0.0.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.develop", AssemblyVersioningScheme.Major, false, "2.0.0.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.develop", AssemblyVersioningScheme.MajorMinorPatch, true, "2.3.4.0", "2.3.4.0")]
[TestCase(2, 3, 4, "5.Branch.develop", AssemblyVersioningScheme.MajorMinorPatch, false, "2.3.4.0", "2.3.4.0")]
public void AssemblyVersion(
int major, int minor, int patch, string buildMetadata,
AssemblyVersioningScheme versioningScheme, bool addNumberOfCommitsSinceTagOnMasterBranchToFileVersion,
string version, string fileVersion)
{
var semVer = new SemanticVersion
{
Major = major,
Minor = minor,
Patch = patch,
BuildMetaData = buildMetadata
};
semVer.BuildMetaData.ReleaseDate = new ReleaseDate
{
OriginalCommitSha = "originalCommitSha",
OriginalDate = DateTimeOffset.Parse("2014-03-01 00:00:01Z"),
CommitSha = "commitSha",
Date = DateTimeOffset.Parse("2014-03-06 23:59:59Z")
};
var vars = VariableProvider.GetVariablesFor(semVer, versioningScheme, addNumberOfCommitsSinceTagOnMasterBranchToFileVersion);
vars[VariableProvider.AssemblyVersion].ShouldBe(version);
vars[VariableProvider.AssemblyFileVersion].ShouldBe(fileVersion);
}
}
}
| {
"content_hash": "ae969f3d575cbb575c135293bdcaec88",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 137,
"avg_line_length": 47.346666666666664,
"alnum_prop": 0.5730780061954379,
"repo_name": "potherca-contrib/GitVersion",
"id": "45095e375bfbb39bcbaf81abda39186f6bf1a93c",
"size": "3553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/VariableProviderTests.cs",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
JournalDocuments = new FS.Collection("journalDocuments", {
stores : [new FS.Store.FileSystem("journalDocuments")]
});
journalDocumentsHandle = Meteor.subscribe('journalDocuments');
FS.HTTP.setBaseUrl('/attachments');
//Load Journal at startup
Journal = new Meteor.Collection("journal");
journalHandle = Meteor.subscribe('journal',{active:true},{});
//Template logic
Template.journal.journals = function() {
var filter = {};
if(Session.get('patientFilter')){
filter.patientId = Session.get('patientFilter');
}
return Journal.find(filter, {
sort : {
timestamp : -1
}
}).map(function(element) {
var patient = Patients.findOne(element.patientId);
element.patientName = niceName(patient.first, patient.last);
var room = Rooms.findOne({
'patientId' : element.patientId
});
if(room){
element.bed = room.number + "" + room.bed;
}
element.date = dateFormatter(element.timestamp);
if (element.journalId) {
element.problemSubject = Journal.findOne(element.journalId).subject.capitalize();
}
element.subject ? (element.subject = ((String)(element.subject)).capitalize()) : null;
var nurse = Meteor.users.findOne(element.nurseId);
element.nurseName = niceName(nurse.profile.first, nurse.profile.last);
element.attachment = JournalDocuments.findOne(element.attachment);
return element;
});
};
Template.journalItems.helpers({
noPatientSelected : function() {
return !Session.get('patientFilter');
},
problems : function() {
return Journal.find({
subject : {
$exists : true
},
patientId: this.patientId,
$or: [{ solved: false}, {solved :{$exists: false}}]
});
}
});
Template.journalItems.events({
'change select' : function(event) {
var problemId = $(event.currentTarget).find(':selected').data("problemid");
Journal.update({
_id : this._id
}, {
$set : {
journalId : problemId
}
});
},
'click .label' : function(event) {
event.preventDefault();
Journal.update({
_id : this._id
}, {
$unset : {
journalId : ""
}
});
},
'click .solved' : function(event) {
event.preventDefault();
Journal.update({
_id : this._id
}, {
$set : {
solved : true
}
});
},
'click .unsolved' : function(event) {
event.preventDefault();
Journal.update({
_id : this._id
}, {
$unset : {
solved : ''
}
});
},
'click .delete' : function(event) {
event.preventDefault();
var id = this._id;
if (this.subject) {
Meteor.call('deleteProblem', id, function(error){
if(error){
alert(error);
}
});
}
if (this.attachment) {
JournalDocuments.remove({
_id : this.attachment._id
}, function(err) {
if (!err) {
Journal.remove({
_id : id
}, function(e) {
if (!e) {
Notifications.success("Success", "Nota eliminata dal Diario");
}
});
}
});
} else {
Journal.remove({
_id : id
}, function(e) {
if (!e) {
Notifications.success("Success", "Nota eliminata dal Diario");
}
});
}
}
});
| {
"content_hash": "757b3b0fb7407c3972b825e820067322",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 88,
"avg_line_length": 21.03472222222222,
"alnum_prop": 0.6193463189171343,
"repo_name": "sacdallago/it.unibz.inf.www.nursing",
"id": "0aaf1b6713b68817b5491fcd8e5b3d3566e19d04",
"size": "3073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nursing/client/journal/journal.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34501"
},
{
"name": "HTML",
"bytes": "51992"
},
{
"name": "JavaScript",
"bytes": "92687"
}
],
"symlink_target": ""
} |
SERVICENAME="e57metadata"
INDEXFILE="app.js"
FOLDER="src"
(cd $FOLDER; pm2 delete $SERVICENAME; pm2 start $INDEXFILE -x --name $SERVICENAME --watch)
| {
"content_hash": "78d92432ea7db60627dda05fa625a55d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 90,
"avg_line_length": 30,
"alnum_prop": 0.74,
"repo_name": "jakob-beetz/microservice-e57metadata",
"id": "9d04dc78f8f4ba4de7b4431fbaeec2b099999b91",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serve-dev.sh",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "874"
},
{
"name": "JavaScript",
"bytes": "175446"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
} |
title: "Project Template for Java"
layout: redirect
redirect: dev/project-configuration
permalink: /getting-started/project-setup/java_api_quickstart.html
---
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
| {
"content_hash": "a40aebc97eb1c7e29c42c71d730fd1fa",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 66,
"avg_line_length": 40.130434782608695,
"alnum_prop": 0.7963163596966414,
"repo_name": "aljoscha/flink",
"id": "62c61c10a9d817976e254fd10ae24b9befc624b9",
"size": "927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/redirects/projectsetup_java_api_quickstart2.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4588"
},
{
"name": "CSS",
"bytes": "58149"
},
{
"name": "Clojure",
"bytes": "93329"
},
{
"name": "Dockerfile",
"bytes": "12142"
},
{
"name": "FreeMarker",
"bytes": "25294"
},
{
"name": "HTML",
"bytes": "108809"
},
{
"name": "Java",
"bytes": "52615568"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "1016891"
},
{
"name": "Scala",
"bytes": "13821841"
},
{
"name": "Shell",
"bytes": "521431"
},
{
"name": "TSQL",
"bytes": "123113"
},
{
"name": "TypeScript",
"bytes": "249103"
}
],
"symlink_target": ""
} |
package com.github.typesafe_query.query.internal.function;
import com.github.typesafe_query.enums.IntervalUnit;
import com.github.typesafe_query.meta.NumberDBColumn;
import com.github.typesafe_query.query.QueryContext;
public class SubtractFunc extends CalculationFuncBase {
public SubtractFunc(Number expr){
super(expr);
}
public SubtractFunc(NumberDBColumn<?> column){
super(column);
}
public SubtractFunc(Number expr, IntervalUnit unit){
super(expr, unit);
}
public SubtractFunc(NumberDBColumn<?> column, IntervalUnit unit){
super(column, unit);
}
@Override
public String getSQL(QueryContext context,
String expression) {
super.symbol = "-";
return super.getSQL(context, expression);
}
} | {
"content_hash": "75baac2fc7ad3c75e3146b048ba073c4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 66,
"avg_line_length": 22.84375,
"alnum_prop": 0.7564979480164159,
"repo_name": "typesafe-query/typesafe-query",
"id": "73e9d328f1b3dc61731627fee9bcabec84c4d43b",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "typesafe-query-core/src/main/java/com/github/typesafe_query/query/internal/function/SubtractFunc.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "464679"
}
],
"symlink_target": ""
} |
module Azure end
module Azure::EventGrid end
module Azure::EventGrid::Mgmt end
module Azure::EventGrid::Mgmt::V2020_10_15_preview end
| {
"content_hash": "4b062732749b1383f8797dee8c00714a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 54,
"avg_line_length": 33.5,
"alnum_prop": 0.7985074626865671,
"repo_name": "Azure/azure-sdk-for-ruby",
"id": "7ee24a804beeefa190032371b1d452e6fd0ef4f4",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "management/azure_mgmt_event_grid/lib/2020-10-15-preview/generated/azure_mgmt_event_grid/module_definition.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "345216400"
},
{
"name": "Shell",
"bytes": "305"
}
],
"symlink_target": ""
} |
"""Landlab component that simulates landslide probability of failure as well as
mean relative wetness and probability of saturation.
Relative wetness and factor-of-safety are based on the infinite slope
stability model driven by topographic and soils inputs and recharge provided
by user as inputs to the component. For each node, component simulates mean
relative wetness as well as the probability of saturation based on Monte Carlo
simulation of relative wetness where the probability is the number of
iterations with relative wetness >= 1.0 divided by the number of iterations.
Probability of failure for each node is also simulated in the Monte Carlo
simulation as the number of iterations with factor-of-safety <= 1.0
divided by the number of iterations.
.. codeauthor:: R.Strauch, E.Istanbulluoglu, & S.S.Nudurupati
University of Washington
Ref 1: Strauch et. al. 2017, 'A hydro-climatological approach to predicting
regional landslide probability using Landlab, Earth Surface Dynamics, In prep.
Ref 2: 'The Landlab LandslideProbability Component User Manual' @
https://github.com/RondaStrauch/pub_strauch_etal_esurf/blob/master/LandslideComponentUsersManual.pdf
Created on Thu Aug 20, 2015
Last edit June 7, 2017
"""
import copy
import numpy as np
import scipy.constants
from scipy import interpolate
from statsmodels.distributions.empirical_distribution import ECDF
from landlab import Component
class LandslideProbability(Component):
"""Landslide probability component using the infinite slope stability
model.
Landlab component designed to calculate probability of failure at
each grid node based on the infinite slope stability model
stability index (Factor of Safety).
The driving force for failure is provided by the user in the form of
groundwater recharge; four options for providing recharge are supported.
The model uses topographic and soil characteristics provided as input
by the user.
The main method of the LandslideProbability class is
`calculate_landslide_probability()``, which calculates the mean soil
relative wetness, probability of soil saturation, and probability of
failure at each node based on a Monte Carlo simulation.
**Usage:**
Option 1 - Uniform recharge
.. code-block:: python
LandslideProbability(grid,
number_of_iterations=250,
groundwater__recharge_distribution='uniform',
groundwater__recharge_min_value=5.,
groundwater__recharge_max_value=121.)
Option 2 - Lognormal recharge
.. code-block:: python
LandslideProbability(grid,
number_of_iterations=250,
groundwater__recharge_distribution='lognormal',
groundwater__recharge_mean=30.,
groundwater__recharge_standard_deviation=0.25)
Option 3 - Lognormal_spatial recharge
.. code-block:: python
LandslideProbability(grid,
number_of_iterations=250,
groundwater__recharge_distribution='lognormal_spatial',
groundwater__recharge_mean=np.random.randint(20, 120, grid_size),
groundwater__recharge_standard_deviation=np.random.rand(grid_size))
Option 4 - Data_driven_spatial recharge
.. code-block:: python
LandslideProbability(grid,
number_of_iterations=250,
groundwater__recharge_distribution='data_driven_spatial',
groundwater__recharge_HSD_inputs=[HSD_dict,
HSD_id_dict,
fract_dict])
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components.landslides import LandslideProbability
>>> import numpy as np
Create a grid on which to calculate landslide probability.
>>> grid = RasterModelGrid((5, 4), xy_spacing=(0.2, 0.2))
Check the number of core nodes.
>>> grid.number_of_core_nodes
6
The grid will need some input data. To check the names of the fields
that provide the input to this component, use the *input_var_names*
class property.
>>> sorted(LandslideProbability.input_var_names) # doctest: +NORMALIZE_WHITESPACE
['soil__density',
'soil__internal_friction_angle',
'soil__maximum_total_cohesion',
'soil__minimum_total_cohesion',
'soil__mode_total_cohesion',
'soil__saturated_hydraulic_conductivity',
'soil__thickness',
'soil__transmissivity',
'topographic__slope',
'topographic__specific_contributing_area']
Check the units for the fields.
>>> LandslideProbability.var_units('topographic__specific_contributing_area')
'm'
Create an input field.
>>> grid.at_node['topographic__slope'] = np.random.rand(grid.number_of_nodes)
If you are not sure about one of the input or output variables, you can
get help for specific variables.
>>> LandslideProbability.var_help('soil__transmissivity') # doctest: +NORMALIZE_WHITESPACE
name: soil__transmissivity
description:
mode rate of water transmitted through a unit width of saturated
soil - either provided or calculated with Ksat and soil depth
units: m2/day
unit agnostic: False
at: node
intent: in
Additional required fields for component.
>>> scatter_dat = np.random.randint(1, 10, grid.number_of_nodes)
>>> grid.at_node['topographic__specific_contributing_area'] = np.sort(
... np.random.randint(30, 900, grid.number_of_nodes).astype(float))
>>> grid.at_node['soil__transmissivity'] = np.sort(
... np.random.randint(5, 20, grid.number_of_nodes).astype(float), -1)
>>> grid.at_node['soil__saturated_hydraulic_conductivity'] = np.sort(
... np.random.randint(2, 10, grid.number_of_nodes).astype(float), -1)
>>> grid.at_node['soil__mode_total_cohesion'] = np.sort(
... np.random.randint(30, 900, grid.number_of_nodes).astype(float))
>>> grid.at_node['soil__minimum_total_cohesion'] = (
... grid.at_node['soil__mode_total_cohesion'] - scatter_dat)
>>> grid.at_node['soil__maximum_total_cohesion'] = (
... grid.at_node['soil__mode_total_cohesion'] + scatter_dat)
>>> grid.at_node['soil__internal_friction_angle'] = np.sort(
... np.random.randint(26, 40, grid.number_of_nodes).astype(float))
>>> grid.at_node['soil__thickness'] = np.sort(
... np.random.randint(1, 10, grid.number_of_nodes).astype(float))
>>> grid.at_node['soil__density'] = (2000. * np.ones(grid.number_of_nodes))
Instantiate the 'LandslideProbability' component to work on this grid,
and run it.
>>> ls_prob = LandslideProbability(grid)
>>> np.allclose(grid.at_node['landslide__probability_of_failure'], 0.)
True
Run the *calculate_landslide_probability* method to update output
variables with grid
>>> ls_prob.calculate_landslide_probability()
Check the output variable names.
>>> sorted(ls_prob.output_var_names) # doctest: +NORMALIZE_WHITESPACE
['landslide__probability_of_failure',
'soil__mean_relative_wetness',
'soil__probability_of_saturation']
Check the output from the component, including array at one node.
>>> np.allclose(grid.at_node['landslide__probability_of_failure'], 0.)
False
>>> core_nodes = ls_prob.grid.core_nodes
References
----------
**Required Software Citation(s) Specific to this Component**
Strauch, R., Istanbulluoglu, E., Nudurupati, S., Bandaragoda, C.,
Gasparini, N., Tucker, G. (2018). A hydroclimatological approach to
predicting regional landslide probability using Landlab Earth Surface
Dynamics 6(1), 49-75. https://dx.doi.org/10.5194/esurf-6-49-2018
**Additional References**
None Listed
"""
# component name
_name = "Landslide Probability"
_unit_agnostic = False
__version__ = "1.0"
_cite_as = """
@article{strauch2018hydroclimatological,
author = {Strauch, Ronda and Istanbulluoglu, Erkan and Nudurupati,
Sai Siddhartha and Bandaragoda, Christina and Gasparini, Nicole M and
Tucker, Gregory E},
title = {{A hydroclimatological approach to predicting regional landslide
probability using Landlab}},
issn = {2196-6311},
doi = {10.5194/esurf-6-49-2018},
pages = {49--75},
number = {1},
volume = {6},
journal = {Earth Surface Dynamics},
year = {2018}
}
"""
_info = {
"landslide__probability_of_failure": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "None",
"mapping": "node",
"doc": "number of times FS is <=1 out of number of iterations user selected",
},
"soil__density": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "kg/m3",
"mapping": "node",
"doc": "wet bulk density of soil",
},
"soil__internal_friction_angle": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "degrees",
"mapping": "node",
"doc": "critical angle just before failure due to friction between particles",
},
"soil__maximum_total_cohesion": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "Pa or kg/m-s2",
"mapping": "node",
"doc": "maximum of combined root and soil cohesion at node",
},
"soil__mean_relative_wetness": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "None",
"mapping": "node",
"doc": "Indicator of soil wetness; relative depth perched water table within the soil layer",
},
"soil__minimum_total_cohesion": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "Pa or kg/m-s2",
"mapping": "node",
"doc": "minimum of combined root and soil cohesion at node",
},
"soil__mode_total_cohesion": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "Pa or kg/m-s2",
"mapping": "node",
"doc": "mode of combined root and soil cohesion at node",
},
"soil__probability_of_saturation": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "None",
"mapping": "node",
"doc": "number of times relative wetness is >=1 out of number of iterations user selected",
},
"soil__saturated_hydraulic_conductivity": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m/day",
"mapping": "node",
"doc": "mode rate of water transmitted through soil - provided if transmissivity is NOT provided to calculate tranmissivity with soil depth",
},
"soil__thickness": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "soil depth to restrictive layer",
},
"soil__transmissivity": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m2/day",
"mapping": "node",
"doc": "mode rate of water transmitted through a unit width of saturated soil - either provided or calculated with Ksat and soil depth",
},
"topographic__slope": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "tan theta",
"mapping": "node",
"doc": "gradient of the ground surface",
},
"topographic__specific_contributing_area": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "specific contributing (upslope area/cell face ) that drains to node",
},
}
def __init__(
self,
grid,
number_of_iterations=250,
g=scipy.constants.g,
groundwater__recharge_distribution="uniform",
groundwater__recharge_min_value=20.0,
groundwater__recharge_max_value=120.0,
groundwater__recharge_mean=None,
groundwater__recharge_standard_deviation=None,
groundwater__recharge_HSD_inputs=[],
seed=0,
):
"""
Parameters
----------
grid: RasterModelGrid
A raster grid.
number_of_iterations: int, optional
Number of iterations to run Monte Carlo simulation (default=250).
groundwater__recharge_distribution: str, optional
single word indicating recharge distribution, either 'uniform',
'lognormal', 'lognormal_spatial,' or 'data_driven_spatial'.
(default='uniform')
groundwater__recharge_min_value: float, optional (mm/d)
minium groundwater recharge for 'uniform' (default=20.)
groundwater__recharge_max_value: float, optional (mm/d)
maximum groundwater recharge for 'uniform' (default=120.)
groundwater__recharge_mean: float, optional (mm/d)
mean grounwater recharge for 'lognormal'
and 'lognormal_spatial' (default=None)
groundwater__recharge_standard_deviation: float, optional (mm/d)
standard deviation of grounwater recharge for 'lognormal'
and 'lognormal_spatial' (default=None)
groundwater__recharge_HSD_inputs: list, optional
list of 3 dictionaries in order (default=[]) - HSD_dict
{Hydrologic Source Domain (HSD) keys: recharge numpy array values},
{node IDs keys: list of HSD_Id values}, HSD_fractions {node IDS
keys: list of HSD fractions values} (none)
Note: this input method is a very specific one, and to use this method,
one has to refer Ref 1 & Ref 2 mentioned above, as this set of
inputs require rigorous pre-processing of data.
g: float, optional (m/sec^2)
acceleration due to gravity.
seed: int, optional
seed for random number generation. if seed is assigned any value
other than the default value of zero, it will create different
sequence. To create a certain sequence repititively, use the same
value as input for seed.
"""
# Initialize seeded random number generation
self._seed_generator(seed)
super().__init__(grid)
# Store parameters and do unit conversions
self._n = int(number_of_iterations)
self._g = g
self._groundwater__recharge_distribution = groundwater__recharge_distribution
# Following code will deal with the input distribution and associated
# parameters
# Uniform distribution
if self._groundwater__recharge_distribution == "uniform":
self._recharge_min = groundwater__recharge_min_value
self._recharge_max = groundwater__recharge_max_value
self._Re = np.random.uniform(
self._recharge_min, self._recharge_max, size=self._n
)
self._Re /= 1000.0 # Convert mm to m
# Lognormal Distribution - Uniform in space
elif self._groundwater__recharge_distribution == "lognormal":
assert (
groundwater__recharge_mean is not None
), "Input mean of the distribution!"
assert (
groundwater__recharge_standard_deviation is not None
), "Input standard deviation of the distribution!"
self._recharge_mean = groundwater__recharge_mean
self._recharge_stdev = groundwater__recharge_standard_deviation
self._mu_lognormal = np.log(
(self._recharge_mean**2)
/ np.sqrt(self._recharge_stdev**2 + self._recharge_mean**2)
)
self._sigma_lognormal = np.sqrt(
np.log((self._recharge_stdev**2) / (self._recharge_mean**2) + 1)
)
self._Re = np.random.lognormal(
self._mu_lognormal, self._sigma_lognormal, self._n
)
self._Re /= 1000.0 # Convert mm to m
# Lognormal Distribution - Variable in space
elif self._groundwater__recharge_distribution == "lognormal_spatial":
assert groundwater__recharge_mean.shape[0] == (
self._grid.number_of_nodes
), "Input array should be of the length of grid.number_of_nodes!"
assert groundwater__recharge_standard_deviation.shape[0] == (
self._grid.number_of_nodes
), "Input array should be of the length of grid.number_of_nodes!"
self._recharge_mean = groundwater__recharge_mean
self._recharge_stdev = groundwater__recharge_standard_deviation
# Custom HSD inputs - Hydrologic Source Domain -> Model Domain
elif self._groundwater__recharge_distribution == "data_driven_spatial":
self._HSD_dict = groundwater__recharge_HSD_inputs[0]
self._HSD_id_dict = groundwater__recharge_HSD_inputs[1]
self._fract_dict = groundwater__recharge_HSD_inputs[2]
self._interpolate_HSD_dict()
# Check if all output fields are initialized
self.initialize_output_fields()
# Create a switch to imply whether Ksat is provided.
if np.all(self._grid.at_node["soil__saturated_hydraulic_conductivity"] == 0):
self._Ksat_provided = 0 # False
else:
self._Ksat_provided = 1 # True
self._nodal_values = self._grid.at_node
def calculate_factor_of_safety(self, i):
"""Method to calculate factor of safety.
Method calculates factor-of-safety stability index by using
node specific parameters, creating distributions of these parameters,
and calculating the index by sampling these distributions 'n' times.
The index is calculated from the 'infinite slope stabilty
factor-of-safety equation' in the format of Pack RT, Tarboton DG,
and Goodwin CN (1998),The SINMAP approach to terrain stability mapping.
Parameters
----------
i: int
index of core node ID.
"""
# generate distributions to sample from to provide input parameters
# currently triangle distribution using mode, min, & max
self._a = np.float32(
self._grid.at_node["topographic__specific_contributing_area"][i]
)
self._theta = np.float32(self._grid.at_node["topographic__slope"][i])
self._Tmode = np.float32(self._grid.at_node["soil__transmissivity"][i])
self._Ksatmode = np.float32(
self._grid.at_node["soil__saturated_hydraulic_conductivity"][i]
)
self._Cmode = np.float32(self._grid.at_node["soil__mode_total_cohesion"][i])
self._Cmin = np.float32(self._grid.at_node["soil__minimum_total_cohesion"][i])
self._Cmax = np.float32(self._grid.at_node["soil__maximum_total_cohesion"][i])
self._phi_mode = np.float32(
self._grid.at_node["soil__internal_friction_angle"][i]
)
self._rho = np.float32(self._grid.at_node["soil__density"][i])
self._hs_mode = np.float32(self._grid.at_node["soil__thickness"][i])
# recharge distribution based on distribution type
if self._groundwater__recharge_distribution == "data_driven_spatial":
self._calculate_HSD_recharge(i)
self._Re /= 1000.0 # mm->m
elif self._groundwater__recharge_distribution == "lognormal_spatial":
mu_lognormal = np.log(
(self._recharge_mean[i] ** 2)
/ np.sqrt(self._recharge_stdev[i] ** 2 + self._recharge_mean[i] ** 2)
)
sigma_lognormal = np.sqrt(
np.log(
(self._recharge_stdev[i] ** 2) / (self._recharge_mean[i] ** 2) + 1
)
)
self._Re = np.random.lognormal(mu_lognormal, sigma_lognormal, self._n)
self._Re /= 1000.0 # Convert mm to m
# Cohesion
# if don't provide fields of min and max C, uncomment 2 lines below
# Cmin = self._Cmode-0.3*self._Cmode
# Cmax = self._Cmode+0.3*self._Cmode
self._C = np.random.triangular(
self._Cmin, self._Cmode, self._Cmax, size=self._n
)
# phi - internal angle of friction provided in degrees
phi_min = self._phi_mode - 0.18 * self._phi_mode
phi_max = self._phi_mode + 0.32 * self._phi_mode
self._phi = np.random.triangular(phi_min, self._phi_mode, phi_max, size=self._n)
# soil thickness
# hs_min = min(0.005, self._hs_mode-0.3*self._hs_mode) # Alternative
hs_min = self._hs_mode - 0.3 * self._hs_mode
hs_max = self._hs_mode + 0.1 * self._hs_mode
self._hs = np.random.triangular(hs_min, self._hs_mode, hs_max, size=self._n)
self._hs[self._hs <= 0.0] = 0.005
if self._Ksat_provided:
# Hydraulic conductivity (Ksat)
Ksatmin = self._Ksatmode - (0.3 * self._Ksatmode)
Ksatmax = self._Ksatmode + (0.1 * self._Ksatmode)
self._Ksat = np.random.triangular(
Ksatmin, self._Ksatmode, Ksatmax, size=self._n
)
self._T = self._Ksat * self._hs
else:
# Transmissivity (T)
Tmin = self._Tmode - (0.3 * self._Tmode)
Tmax = self._Tmode + (0.1 * self._Tmode)
self._T = np.random.triangular(Tmin, self._Tmode, Tmax, size=self._n)
# calculate Factor of Safety for n number of times
# calculate components of FS equation
self._C_dim = self._C / (
self._hs * self._rho * self._g
) # dimensionless cohesion
self._rel_wetness = ((self._Re) / self._T) * (
self._a / np.sin(np.arctan(self._theta))
) # relative wetness
# calculate probability of saturation
countr = 0
for val in self._rel_wetness: # find how many RW values >= 1
if val >= 1.0:
countr = countr + 1 # number with RW values (>=1)
# probability: No. high RW values/total No. of values (n)
self._soil__probability_of_saturation = np.float32(countr) / self._n
# Maximum Rel_wetness = 1.0
np.place(self._rel_wetness, self._rel_wetness > 1, 1.0)
self._soil__mean_relative_wetness = np.mean(self._rel_wetness)
Y = np.tan(np.radians(self._phi)) * (1 - (self._rel_wetness * 0.5))
# convert from degrees; 0.5 = water to soil density ratio
# calculate Factor-of-safety
self._FS = (self._C_dim / np.sin(np.arctan(self._theta))) + (
np.cos(np.arctan(self._theta)) * (Y / np.sin(np.arctan(self._theta)))
)
count = 0
for val in self._FS: # find how many FS values <= 1
if val <= 1.0:
count = count + 1 # number with unstable FS values (<=1)
# probability: No. unstable values/total No. of values (n)
self._landslide__probability_of_failure = np.float32(count) / self._n
def calculate_landslide_probability(self):
"""Main method of Landslide Probability class.
Method creates arrays for output variables then loops through
all the core nodes to run the method
'calculate_factor_of_safety.' Output parameters probability of
failure, mean relative wetness, and probability of saturation
are assigned as fields to nodes.
"""
# Create arrays for data with -9999 as default to store output
self._mean_Relative_Wetness = np.full(self._grid.number_of_nodes, -9999.0)
self._prob_fail = np.full(self._grid.number_of_nodes, -9999.0)
self._prob_sat = np.full(self._grid.number_of_nodes, -9999.0)
# Run factor of safety Monte Carlo for all core nodes in domain
# i refers to each core node id
for i in self._grid.core_nodes:
self.calculate_factor_of_safety(i)
# Populate storage arrays with calculated values
self._mean_Relative_Wetness[i] = self._soil__mean_relative_wetness
self._prob_fail[i] = self._landslide__probability_of_failure
self._prob_sat[i] = self._soil__probability_of_saturation
# Values can't be negative
self._mean_Relative_Wetness[self._mean_Relative_Wetness < 0.0] = 0.0
self._prob_fail[self._prob_fail < 0.0] = 0.0
# assign output fields to nodes
self._grid.at_node["soil__mean_relative_wetness"] = self._mean_Relative_Wetness
self._grid.at_node["landslide__probability_of_failure"] = self._prob_fail
self._grid.at_node["soil__probability_of_saturation"] = self._prob_sat
def _seed_generator(self, seed=0):
"""Method to initiate random seed.
Seed the random-number generator. This method will create the
same sequence again by re-seeding with the same value (default
value is zero). To create a sequence other than the default,
assign non-zero value for seed.
"""
np.random.seed(seed)
def _interpolate_HSD_dict(self):
"""Method to extrapolate input data.
This method uses a non-parametric approach to expand the input
recharge array to the length of number of iterations. Output is
a new dictionary of interpolated recharge for each HSD id.
"""
HSD_dict = copy.deepcopy(self._HSD_dict)
# First generate interpolated Re for each HSD grid
Yrand = np.sort(np.random.rand(self._n))
# n random numbers (0 to 1) in a column
for vkey in HSD_dict.keys():
if isinstance(HSD_dict[vkey], int):
continue # loop back up if value is integer (e.g. -9999)
Re_temp = HSD_dict[vkey] # an array of annual Re for 1 HSD grid
Fx = ECDF(Re_temp) # instantiate to get probabilities with Re
Fx_ = Fx(Re_temp) # probability array associated with Re data
# interpolate function based on recharge data & probability
f = interpolate.interp1d(
Fx_, Re_temp, bounds_error=False, fill_value=min(Re_temp)
)
# array of Re interpolated from Yrand probabilities (n count)
Re_interpolated = f(Yrand)
# replace values in HSD_dict with interpolated Re
HSD_dict[vkey] = Re_interpolated
self._interpolated_HSD_dict = HSD_dict
def _calculate_HSD_recharge(self, i):
"""Method to calculate recharge based on upstream fractions.
This method calculates the resultant recharge at node i of the
model domain, using recharge of contributing HSD ids and the
areal fractions of upstream contributing HSD ids. Output is a
numpy array of recharge at node i.
"""
store_Re = np.zeros(self._n)
HSD_id_list = self._HSD_id_dict[i]
fract_list = self._fract_dict[i]
for j in range(0, len(HSD_id_list)):
Re_temp = self._interpolated_HSD_dict[HSD_id_list[j]]
fract_temp = fract_list[j]
Re_adj = Re_temp * fract_temp
store_Re = np.vstack((store_Re, np.array(Re_adj)))
self._Re = np.sum(store_Re, 0)
| {
"content_hash": "c973d84a40464140e54e3cb4720ff3f2",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 154,
"avg_line_length": 42.53566009104704,
"alnum_prop": 0.5982662052727338,
"repo_name": "landlab/landlab",
"id": "3ebdb167884aac75917d170e2c0d4ecd00e4fcc5",
"size": "28049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landlab/components/landslides/landslide_probability.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "762"
},
{
"name": "Cython",
"bytes": "265735"
},
{
"name": "Gherkin",
"bytes": "1601"
},
{
"name": "Jupyter Notebook",
"bytes": "1373117"
},
{
"name": "Makefile",
"bytes": "2250"
},
{
"name": "Python",
"bytes": "4497175"
},
{
"name": "Roff",
"bytes": "445"
},
{
"name": "Shell",
"bytes": "1073"
},
{
"name": "TeX",
"bytes": "42252"
}
],
"symlink_target": ""
} |
#include <errno.h>
#include <priv/alt_file.h>
#include <alt_types.h>
#include "altera_up_avalon_audio_and_video_config.h"
#include "altera_up_avalon_audio_and_video_config_regs.h"
alt_up_av_config_dev* alt_up_av_config_open_dev(const char* name)
{
// find the device from the device list
// (see altera_hal/HAL/inc/priv/alt_file.h
// and altera_hal/HAL/src/alt_find_dev.c
// for details)
alt_up_av_config_dev *dev = (alt_up_av_config_dev*)alt_find_dev(name, &alt_dev_list);
dev->type = (IORD_ALT_UP_AV_CONFIG_STATUS(dev->base) & ALT_UP_AV_CONFIG_STATUS_CFG_MSK) >> ALT_UP_AV_CONFIG_STATUS_CFG_OFST;
return dev;
}
/**
* @brief Send data to the device
*
* @param av_config -- the device data structure
* @param addr -- the address of the destination device register
* @param data -- the data to be sent
*
* @return 0 for success
**/
int alt_up_av_config_write_data(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 data)
{
// set the register address in the Address register
IOWR_ALT_UP_AV_CONFIG_ADDRESS(av_config->base, addr & ALT_UP_AV_CONFIG_ADDRESS_VALID_MSK);
// write data to the device Data register
IOWR_ALT_UP_AV_CONFIG_DATA(av_config->base, data & ALT_UP_AV_CONFIG_DATA_VALID_MSK);
return 0;
}
/**
* @brief Reads data from the device
*
* @param av_config -- the device data structure
* @param addr -- the address of the source device register
* @param data -- a pointer to the location where the read data should be stored
*
* @return 0 for success or -1 for failure
**/
int alt_up_av_config_read_data(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 * data)
{
// set the register address in the Address register
IOWR_ALT_UP_AV_CONFIG_ADDRESS(av_config->base, addr & ALT_UP_AV_CONFIG_ADDRESS_VALID_MSK);
// write data to the device Data register
*(data) = IORD_ALT_UP_AV_CONFIG_DATA(av_config->base);
// check acknowledge is 0
if (alt_up_av_config_read_acknowledge(av_config))
return -1;
return 0;
}
int alt_up_av_config_reset(alt_up_av_config_dev *av_config)
{
IOWR_ALT_UP_AV_CONFIG_CONTROL_RESET(av_config->base);
return 0;
}
int alt_up_av_config_enable_interrupt(alt_up_av_config_dev *av_config)
{
IOWR_ALT_UP_AV_CONFIG_CONTROL_RIE_ENABLE(av_config->base);
return 0;
}
int alt_up_av_config_disable_interrupt(alt_up_av_config_dev *av_config)
{
IOWR_ALT_UP_AV_CONFIG_CONTROL_RIE_DISABLE(av_config->base);
return 0;
}
int alt_up_av_config_read_acknowledge(alt_up_av_config_dev *av_config)
{
if ((IORD_ALT_UP_AV_CONFIG_STATUS(av_config->base) & ALT_UP_AV_CONFIG_STATUS_RDY_MSK) >> ALT_UP_AV_CONFIG_STATUS_RDY_OFST)
return ((IORD_ALT_UP_AV_CONFIG_STATUS(av_config->base) & ALT_UP_AV_CONFIG_STATUS_ACK_MSK) >> ALT_UP_AV_CONFIG_STATUS_ACK_OFST);
return -1;
}
int alt_up_av_config_read_ready(alt_up_av_config_dev *av_config)
{
return ((IORD_ALT_UP_AV_CONFIG_STATUS(av_config->base) & ALT_UP_AV_CONFIG_STATUS_RDY_MSK) >> ALT_UP_AV_CONFIG_STATUS_RDY_OFST);
}
int alt_up_av_config_write_audio_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 data)
{
// check the device is indeed audio configuration
if (av_config->type != ON_BOARD_AUDIO_ONLY_CONFIG && av_config->type != ON_BOARD_DE2_CONFIG && av_config->type != ON_BOARD_DE2_70_CONFIG && av_config->type != ON_BOARD_DE2_115_CONFIG)
return -EINVAL;
// set the audio device to be configured in the Control register
SELECTED_ON_BOARD_DEVICE device = AUDIO_DEVICE;
IOWR_ALT_UP_AV_CONFIG_CONTROL_DEVICE(av_config->base, device);
return alt_up_av_config_write_data(av_config, addr, data);
}
int alt_up_av_config_read_video_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 *data, SELECTED_ON_BOARD_DEVICE video_port)
{
// check the device is indeed audio configuration
if (av_config->type != ON_BOARD_DE2_CONFIG && av_config->type != ON_BOARD_DE2_70_CONFIG && av_config->type != ON_BOARD_DE2_115_CONFIG)
return -EINVAL;
if (av_config->type == ON_BOARD_DE2_CONFIG || av_config->type == ON_BOARD_DE2_115_CONFIG)
{
if (video_port != FIRST_VIDEO_DEVICE)
return -EINVAL;
} else {
if ((video_port != FIRST_VIDEO_DEVICE) && (video_port != SECOND_VIDEO_DEVICE))
return -EINVAL;
}
// set the video device to be configured in the Control register
IOWR_ALT_UP_AV_CONFIG_CONTROL_DEVICE(av_config->base, video_port);
return alt_up_av_config_read_data(av_config, addr, data);
}
int alt_up_av_config_write_video_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 data, SELECTED_ON_BOARD_DEVICE video_port)
{
// check the device is indeed audio configuration
if (av_config->type != ON_BOARD_DE2_CONFIG && av_config->type != ON_BOARD_DE2_70_CONFIG && av_config->type != ON_BOARD_DE2_115_CONFIG)
return -EINVAL;
if (av_config->type == ON_BOARD_DE2_CONFIG || av_config->type == ON_BOARD_DE2_115_CONFIG)
{
if (video_port != FIRST_VIDEO_DEVICE)
return -EINVAL;
} else {
if ((video_port != FIRST_VIDEO_DEVICE) && (video_port != SECOND_VIDEO_DEVICE))
return -EINVAL;
}
// set the video device to be configured in the Control register
IOWR_ALT_UP_AV_CONFIG_CONTROL_DEVICE(av_config->base, video_port);
return alt_up_av_config_write_data(av_config, addr, data);
}
int alt_up_av_config_read_DC2_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 *data)
{
if (av_config->type != TRDB_DC2_CONFIG)
return -EINVAL;
return alt_up_av_config_read_data(av_config, addr, data);
}
int alt_up_av_config_write_DC2_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 data)
{
if (av_config->type != TRDB_DC2_CONFIG)
return -EINVAL;
return alt_up_av_config_write_data(av_config, addr, data);
}
int alt_up_av_config_read_D5M_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 *data)
{
if (av_config->type != TRDB_D5M_CONFIG)
return -EINVAL;
return alt_up_av_config_read_data(av_config, addr, data);
}
int alt_up_av_config_write_D5M_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 data)
{
if (av_config->type != TRDB_D5M_CONFIG)
return -EINVAL;
return alt_up_av_config_write_data(av_config, addr, data);
}
int alt_up_av_config_read_LTM_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 *data)
{
if (av_config->type != TRDB_LTM_CONFIG)
return -EINVAL;
return alt_up_av_config_read_data(av_config, addr, data);
}
int alt_up_av_config_write_LTM_cfg_register(alt_up_av_config_dev *av_config, alt_u32 addr, alt_u32 data)
{
if (av_config->type != TRDB_LTM_CONFIG)
return -EINVAL;
return alt_up_av_config_write_data(av_config, addr, data);
}
| {
"content_hash": "a4614851dbb2482ce6e3c6a54be863c1",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 184,
"avg_line_length": 34.050251256281406,
"alnum_prop": 0.6806375442739079,
"repo_name": "Saucyz/explode",
"id": "217891e11c5404b1a9b3104bbdf371e79563c033",
"size": "9123",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Hardware/Mod2/software/Mod2_DE2_program_bsp/drivers/src/altera_up_avalon_audio_and_video_config.c",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "72479"
},
{
"name": "C",
"bytes": "758259"
},
{
"name": "C++",
"bytes": "108303"
},
{
"name": "HTML",
"bytes": "263059"
},
{
"name": "Logos",
"bytes": "13573"
},
{
"name": "Makefile",
"bytes": "191151"
},
{
"name": "Mathematica",
"bytes": "346"
},
{
"name": "Python",
"bytes": "87135"
},
{
"name": "Shell",
"bytes": "4813"
},
{
"name": "Standard ML",
"bytes": "32"
},
{
"name": "SystemVerilog",
"bytes": "379091"
},
{
"name": "VHDL",
"bytes": "16752591"
},
{
"name": "Verilog",
"bytes": "1247774"
}
],
"symlink_target": ""
} |
<?php
namespace Phlexible\Component\MediaCache\Worker;
use Phlexible\Component\MediaCache\Domain\CacheItem;
use Phlexible\Component\MediaCache\Model\CacheManagerInterface;
use Phlexible\Component\MediaTemplate\Model\TemplateInterface;
use Phlexible\Component\MediaType\Model\MediaType;
use Psr\Log\LoggerInterface;
/**
* Null cache worker.
*
* @author Stephan Wentz <sw@brainbits.net>
*/
class NullWorker implements WorkerInterface
{
/**
* @var CacheManagerInterface
*/
private $cacheManager;
/**
* @var LoggerInterface
*/
private $logger;
/**
* @param CacheManagerInterface $cacheManager
* @param LoggerInterface $logger
*/
public function __construct(
CacheManagerInterface $cacheManager,
LoggerInterface $logger
) {
$this->cacheManager = $cacheManager;
$this->logger = $logger;
}
/**
* {@inheritdoc}
*/
public function getLogger()
{
return $this->logger;
}
/**
* {@inheritdoc}
*/
public function accept(TemplateInterface $template, InputDescriptor $input, MediaType $mediaType)
{
return true;
}
/**
* {@inheritdoc}
*/
public function process(CacheItem $cacheItem, TemplateInterface $template, InputDescriptor $input, MediaType $mediaType)
{
if ($cacheItem->getCacheStatus() !== CacheItem::STATUS_OK && $cacheItem->getCacheStatus() !== CacheItem::STATUS_DELEGATE) {
$this->cacheManager->deleteCacheItem($cacheItem);
$cacheItem->setCacheStatus(CacheItem::STATUS_DELETED);
}
$cacheItem->setQueueStatus(CacheItem::QUEUE_NOT_APPLICABLE);
}
}
| {
"content_hash": "1648db1fdc68446bb65cd7ef61f2a072",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 131,
"avg_line_length": 24.73913043478261,
"alnum_prop": 0.6479203280609256,
"repo_name": "phlexible/phlexible",
"id": "05cd67aa7761d22621915c880b1e55e2818f90c0",
"size": "1933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Phlexible/Component/MediaCache/Worker/NullWorker.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "340698"
},
{
"name": "HTML",
"bytes": "18550"
},
{
"name": "JavaScript",
"bytes": "5384154"
},
{
"name": "PHP",
"bytes": "2551770"
}
],
"symlink_target": ""
} |
@interface NSString (NSStringExtensions) <LuaCoding>
- (id) string;
- (unsigned short)unsignedShortValue;
- (NSComparisonResult) numericCompare:(NSString *)string;
- (BOOL)isEqualToCaseInsensitiveString:(NSString *)string;
@end
| {
"content_hash": "005b90e820d405f3c010ae356abc6b48",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 38,
"alnum_prop": 0.7894736842105263,
"repo_name": "gamefreak/Athena",
"id": "391f01add0e1e9257cb98b243d6c4394a2bce2b4",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Wrappers/NSStringExtensions.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2883412"
},
{
"name": "Objective-C",
"bytes": "600879"
},
{
"name": "Perl",
"bytes": "31459"
},
{
"name": "Shell",
"bytes": "1439446"
}
],
"symlink_target": ""
} |
#pragma once
#include "il2cpp-config.h"
#ifndef _MSC_VER
# include <alloca.h>
#else
# include <malloc.h>
#endif
#include <stdint.h>
// System.Array
struct Il2CppArray;
#include "mscorlib_System_ValueType1744280289.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#pragma clang diagnostic ignored "-Wunused-variable"
#endif
// System.Array/InternalEnumerator`1<System.Reflection.Emit.ILTokenInfo>
struct InternalEnumerator_1_t136423630
{
public:
// System.Array System.Array/InternalEnumerator`1::array
Il2CppArray * ___array_0;
// System.Int32 System.Array/InternalEnumerator`1::idx
int32_t ___idx_1;
public:
inline static int32_t get_offset_of_array_0() { return static_cast<int32_t>(offsetof(InternalEnumerator_1_t136423630, ___array_0)); }
inline Il2CppArray * get_array_0() const { return ___array_0; }
inline Il2CppArray ** get_address_of_array_0() { return &___array_0; }
inline void set_array_0(Il2CppArray * value)
{
___array_0 = value;
Il2CppCodeGenWriteBarrier(&___array_0, value);
}
inline static int32_t get_offset_of_idx_1() { return static_cast<int32_t>(offsetof(InternalEnumerator_1_t136423630, ___idx_1)); }
inline int32_t get_idx_1() const { return ___idx_1; }
inline int32_t* get_address_of_idx_1() { return &___idx_1; }
inline void set_idx_1(int32_t value)
{
___idx_1 = value;
}
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
| {
"content_hash": "833abc5f236356f5f87aa901c90517d9",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 134,
"avg_line_length": 26.74074074074074,
"alnum_prop": 0.7132963988919667,
"repo_name": "Bersaelor/D2",
"id": "3441d7acbfe63ca2885d7f140bf780e5f47ff3aa",
"size": "1446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iOS_app/Classes/Native/mscorlib_System_Array_InternalEnumerator_1_gen136423630.h",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8761907"
},
{
"name": "C#",
"bytes": "466911"
},
{
"name": "C++",
"bytes": "44327143"
},
{
"name": "GLSL",
"bytes": "308612"
},
{
"name": "Java",
"bytes": "15922"
},
{
"name": "Objective-C",
"bytes": "61244"
},
{
"name": "Objective-C++",
"bytes": "264430"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
} |
package org.apache.camel.component.cxf;
import org.apache.camel.CamelContext;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.spring.SpringCamelContext;
import org.junit.After;
import org.junit.Before;
import org.springframework.context.support.AbstractXmlApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
public class CxfSpringRouterTest extends CxfSimpleRouterTest {
protected AbstractXmlApplicationContext applicationContext;
@Override
@Before
public void setUp() throws Exception {
CXFTestSupport.getPort1();
applicationContext = createApplicationContext();
super.setUp();
assertNotNull("Should have created a valid spring context", applicationContext);
}
@Override
@After
public void tearDown() throws Exception {
// Don't close the application context, as it will cause some trouble on the bus shutdown
super.tearDown();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("cxf:bean:routerEndpoint").to("cxf:bean:serviceEndpoint");
}
};
}
@Override
protected CamelContext createCamelContext() throws Exception {
return SpringCamelContext.springCamelContext(applicationContext, true);
}
protected ClassPathXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/component/cxf/CxfSpringRouterBeans.xml");
}
}
| {
"content_hash": "77520dcc0b84fc20a93fb743abdd9cb5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 109,
"avg_line_length": 30.358490566037737,
"alnum_prop": 0.7215661901802362,
"repo_name": "ullgren/camel",
"id": "3a0f0a92fba525a260aa1c5889460c8b7b75692a",
"size": "2411",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "components/camel-cxf/src/test/java/org/apache/camel/component/cxf/CxfSpringRouterTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Apex",
"bytes": "6519"
},
{
"name": "Batchfile",
"bytes": "1518"
},
{
"name": "CSS",
"bytes": "16394"
},
{
"name": "Elm",
"bytes": "10852"
},
{
"name": "FreeMarker",
"bytes": "11410"
},
{
"name": "Groovy",
"bytes": "14490"
},
{
"name": "HTML",
"bytes": "896075"
},
{
"name": "Java",
"bytes": "69929414"
},
{
"name": "JavaScript",
"bytes": "90399"
},
{
"name": "Makefile",
"bytes": "513"
},
{
"name": "Shell",
"bytes": "17108"
},
{
"name": "Tcl",
"bytes": "4974"
},
{
"name": "Thrift",
"bytes": "6979"
},
{
"name": "XQuery",
"bytes": "546"
},
{
"name": "XSLT",
"bytes": "270186"
}
],
"symlink_target": ""
} |
<?php
namespace Diet\CoreBundle\Entity;
use Doctrine\Common\Collections\ArrayCollection;
use Doctrine\ORM\Mapping as ORM;
use Symfony\Component\Security\Core\User\UserInterface;
/**
* Admin
*/
class Admin implements UserInterface, \Serializable
{
/**
* @var integer
*/
private $id;
/**
* Get id
*
* @return integer
*/
public function getId()
{
return $this->id;
}
/**
* @var string
*/
private $username;
/**
* @var string
*/
private $salt;
/**
* @var string
*/
private $password;
/**
* @var boolean
*/
private $isActive;
/**
* Set username
*
* @param string $username
* @return Admin
*/
public function setUsername($username)
{
$this->username = $username;
return $this;
}
/**
* Get username
*
* @return string
*/
public function getUsername()
{
return $this->username;
}
/**
* Get salt
*
* @return string
*/
public function getSalt()
{
return $this->salt;
}
/**
* Set password
*
* @param string $password
* @return Admin
*/
public function setPassword($password)
{
$this->password = $password;
return $this;
}
/**
* Get password
*
* @return string
*/
public function getPassword()
{
return $this->password;
}
/**
* Set isActive
*
* @param boolean $isActive
* @return Admin
*/
public function setIsActive($isActive)
{
$this->isActive = $isActive;
return $this;
}
/**
* Get isActive
*
* @return boolean
*/
public function getIsActive()
{
return $this->isActive;
}
public function eraseCredentials()
{
}
/**
* @see \Serializable::serialize()
*/
public function serialize()
{
return serialize(array(
$this->id,
));
}
/**
* @see \Serializable::unserialize()
*/
public function unserialize($serialized)
{
list($this->id, ) = unserialize($serialized);
}
/**
* @var string
*/
private $email;
/**
* Set email
*
* @param string $email
* @return Admin
*/
public function setEmail($email)
{
$this->email = $email;
return $this;
}
/**
* Get email
*
* @return string
*/
public function getEmail()
{
return $this->email;
}
/**
* Set salt
*
* @param string $salt
* @return Admin
*/
public function setSalt($salt)
{
$this->salt = $salt;
return $this;
}
/**
* @var \Doctrine\Common\Collections\Collection
*/
private $adminRoles;
/**
* construct
*/
public function __construct()
{
$this->adminRoles = new ArrayCollection();
$this->isActive = true;
$this->salt = md5(uniqid(null, true));
}
/**
* Add adminRoles
*
* @param \Diet\CoreBundle\Entity\Role $adminRoles
* @return Admin
*/
public function addAdminRole(\Diet\CoreBundle\Entity\Role $adminRoles)
{
$this->adminRoles[] = $adminRoles;
return $this;
}
/**
* Remove adminRoles
*
* @param \Diet\CoreBundle\Entity\Role $adminRoles
*/
public function removeAdminRole(\Diet\CoreBundle\Entity\Role $adminRoles)
{
$this->adminRoles->removeElement($adminRoles);
}
/**
* Get adminRoles
*
* @return \Doctrine\Common\Collections\Collection
*/
public function getAdminRoles()
{
return $this->adminRoles;
}
/**
* @return array
*/
public function getRoles()
{
return $this->getAdminRoles()->toArray();
}
}
| {
"content_hash": "f80bf42bfa0a3a9f17f84ce2e7a7bf5e",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 77,
"avg_line_length": 16.358024691358025,
"alnum_prop": 0.5033962264150943,
"repo_name": "tomosooon/jp.co.3gweight",
"id": "7b53ba6e9c9839973ae7797a40de2cdae75fae7a",
"size": "3975",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/Diet/CoreBundle/Entity/Admin.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255220"
},
{
"name": "JavaScript",
"bytes": "809453"
},
{
"name": "PHP",
"bytes": "101575"
},
{
"name": "Shell",
"bytes": "2532"
}
],
"symlink_target": ""
} |
module Magaz
module DashboardHelper
end
end
| {
"content_hash": "00fd0db762fe9eef17c7caa43827ef07",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 24,
"avg_line_length": 12,
"alnum_prop": 0.7916666666666666,
"repo_name": "nmix/magaz",
"id": "9b09009684ae26702e028a085e0be1f1eed50daf",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/helpers/magaz/dashboard_helper.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5355"
},
{
"name": "HTML",
"bytes": "75385"
},
{
"name": "JavaScript",
"bytes": "5179"
},
{
"name": "Ruby",
"bytes": "312595"
}
],
"symlink_target": ""
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_25) on Thu Jul 30 20:27:38 EDT 2015 -->
<title>DifferentialControlLoopCoefficients</title>
<meta name="date" content="2015-07-30">
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="DifferentialControlLoopCoefficients";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../com/qualcomm/robotcore/util/ElapsedTime.html" title="class in com.qualcomm.robotcore.util"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html" target="_top">Frames</a></li>
<li><a href="DifferentialControlLoopCoefficients.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#field.summary">Field</a> | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#methods.inherited.from.class.java.lang.Object">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li><a href="#field.detail">Field</a> | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">com.qualcomm.robotcore.util</div>
<h2 title="Class DifferentialControlLoopCoefficients" class="title">Class DifferentialControlLoopCoefficients</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li>com.qualcomm.robotcore.util.DifferentialControlLoopCoefficients</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<hr>
<br>
<pre>public class <span class="typeNameLabel">DifferentialControlLoopCoefficients</span>
extends java.lang.Object</pre>
<div class="block">Contains p, i, and d coefficients for control loops</div>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- =========== FIELD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="field.summary">
<!-- -->
</a>
<h3>Field Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Field Summary table, listing fields, and an explanation">
<caption><span>Fields</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Field and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>double</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html#d">d</a></span></code>
<div class="block">d coefficient</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>double</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html#i">i</a></span></code>
<div class="block">i coefficient</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>double</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html#p">p</a></span></code>
<div class="block">p coefficient</div>
</td>
</tr>
</table>
</li>
</ul>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><span class="memberNameLink"><a href="../../../../com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html#DifferentialControlLoopCoefficients--">DifferentialControlLoopCoefficients</a></span>()</code>
<div class="block">Constructor with coefficients set to 0.0</div>
</td>
</tr>
<tr class="rowColor">
<td class="colOne"><code><span class="memberNameLink"><a href="../../../../com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html#DifferentialControlLoopCoefficients-double-double-double-">DifferentialControlLoopCoefficients</a></span>(double p,
double i,
double d)</code>
<div class="block">Constructor with coefficients supplied</div>
</td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ FIELD DETAIL =========== -->
<ul class="blockList">
<li class="blockList"><a name="field.detail">
<!-- -->
</a>
<h3>Field Detail</h3>
<a name="p">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>p</h4>
<pre>public double p</pre>
<div class="block">p coefficient</div>
</li>
</ul>
<a name="i">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>i</h4>
<pre>public double i</pre>
<div class="block">i coefficient</div>
</li>
</ul>
<a name="d">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>d</h4>
<pre>public double d</pre>
<div class="block">d coefficient</div>
</li>
</ul>
</li>
</ul>
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="DifferentialControlLoopCoefficients--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>DifferentialControlLoopCoefficients</h4>
<pre>public DifferentialControlLoopCoefficients()</pre>
<div class="block">Constructor with coefficients set to 0.0</div>
</li>
</ul>
<a name="DifferentialControlLoopCoefficients-double-double-double-">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>DifferentialControlLoopCoefficients</h4>
<pre>public DifferentialControlLoopCoefficients(double p,
double i,
double d)</pre>
<div class="block">Constructor with coefficients supplied</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>p</code> - </dd>
<dd><code>i</code> - </dd>
<dd><code>d</code> - </dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../com/qualcomm/robotcore/util/ElapsedTime.html" title="class in com.qualcomm.robotcore.util"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html" target="_top">Frames</a></li>
<li><a href="DifferentialControlLoopCoefficients.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#field.summary">Field</a> | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#methods.inherited.from.class.java.lang.Object">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li><a href="#field.detail">Field</a> | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| {
"content_hash": "fc9a1fe52cf74bbcd30dbd3a768cf129",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 264,
"avg_line_length": 32.88955223880597,
"alnum_prop": 0.6404973679433654,
"repo_name": "pranavburugula/test",
"id": "8bf928f899d39b7b947a2b6a277249dafbe9bb2e",
"size": "11018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/javadoc/com/qualcomm/robotcore/util/DifferentialControlLoopCoefficients.html",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "105223"
}
],
"symlink_target": ""
} |
namespace Upstream {
RingHashLoadBalancer::RingHashLoadBalancer(HostSet& host_set, ClusterStats& stats,
Runtime::Loader& runtime,
Runtime::RandomGenerator& random)
: host_set_(host_set), stats_(stats), runtime_(runtime), random_(random) {
host_set_.addMemberUpdateCb([this](const std::vector<HostSharedPtr>&,
const std::vector<HostSharedPtr>&) -> void { refresh(); });
refresh();
}
HostConstSharedPtr RingHashLoadBalancer::chooseHost(const LoadBalancerContext* context) {
if (LoadBalancerUtility::isGlobalPanic(host_set_, stats_, runtime_)) {
return all_hosts_ring_.chooseHost(context, random_);
} else {
return healthy_hosts_ring_.chooseHost(context, random_);
}
}
HostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(const LoadBalancerContext* context,
Runtime::RandomGenerator& random) {
if (ring_.empty()) {
return nullptr;
}
// If there is no hash in the context, just choose a random value (this effectively becomes
// the random LB but it won't crash if someone configures it this way).
uint64_t h;
if (!context || !context->hashKey().valid()) {
h = random.random();
} else {
h = context->hashKey().value();
}
// Ported from https://github.com/RJ/ketama/blob/master/libketama/ketama.c (ketama_get_server)
// I've generally kept the variable names to make the code easier to compare.
// NOTE: The algorithm depends on using signed integers for lowp, midp, and highp. Do not
// change them!
int64_t lowp = 0;
int64_t highp = ring_.size();
while (true) {
int64_t midp = (lowp + highp) / 2;
if (midp == static_cast<int64_t>(ring_.size())) {
return ring_[0].host_;
}
uint64_t midval = ring_[midp].hash_;
uint64_t midval1 = midp == 0 ? 0 : ring_[midp - 1].hash_;
if (h <= midval && h > midval1) {
return ring_[midp].host_;
}
if (midval < h) {
lowp = midp + 1;
} else {
highp = midp - 1;
}
if (lowp > highp) {
return ring_[0].host_;
}
}
}
void RingHashLoadBalancer::Ring::create(Runtime::Loader& runtime,
const std::vector<HostSharedPtr>& hosts) {
log_trace("ring hash: building ring");
ring_.clear();
if (hosts.empty()) {
return;
}
// Currently we specify the minimum size of the ring, and determine the replication factor
// based on the number of hosts. It's possible we might want to support more sophisticated
// configuration in the future.
// NOTE: Currently we keep a ring for healthy hosts and unhealthy hosts, and this is done per
// thread. This is the simplest implementation, but it's expensive from a memory
// standpoint and duplicates the regeneration computation. In the future we might want
// to generate the rings centrally and then just RCU them out to each thread. This is
// sufficient for getting started.
uint64_t min_ring_size = runtime.snapshot().getInteger("upstream.ring_hash.min_ring_size", 1024);
uint64_t hashes_per_host = 1;
if (hosts.size() < min_ring_size) {
hashes_per_host = min_ring_size / hosts.size();
if ((min_ring_size % hosts.size()) != 0) {
hashes_per_host++;
}
}
log_trace("ring hash: min_ring_size={} hashes_per_host={}", min_ring_size, hashes_per_host);
ring_.reserve(hosts.size() * hashes_per_host);
for (auto host : hosts) {
for (uint64_t i = 0; i < hashes_per_host; i++) {
std::string hash_key(host->address()->asString() + "_" + std::to_string(i));
uint64_t hash = std::hash<std::string>()(hash_key);
log_trace("ring hash: hash_key={} hash={}", hash_key, hash);
ring_.push_back({hash, host});
}
}
std::sort(ring_.begin(), ring_.end(), [](const RingEntry& lhs, const RingEntry& rhs)
-> bool { return lhs.hash_ < rhs.hash_; });
#ifndef NDEBUG
for (auto entry : ring_) {
log_trace("ring hash: host={} hash={}", entry.host_->address()->asString(), entry.hash_);
}
#endif
}
void RingHashLoadBalancer::refresh() {
all_hosts_ring_.create(runtime_, host_set_.hosts());
healthy_hosts_ring_.create(runtime_, host_set_.healthyHosts());
}
} // Upstream
| {
"content_hash": "561401d17a528e2e6af42b06a6f0a2d0",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 99,
"avg_line_length": 36.65546218487395,
"alnum_prop": 0.6130215497478221,
"repo_name": "timperrett/envoy",
"id": "6c5fa1344823d8627e33c72e39f46468830a73bc",
"size": "4488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/common/upstream/ring_hash_lb.cc",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "226"
},
{
"name": "C++",
"bytes": "2662234"
},
{
"name": "CMake",
"bytes": "20781"
},
{
"name": "Makefile",
"bytes": "3887"
},
{
"name": "Protocol Buffer",
"bytes": "6293"
},
{
"name": "Python",
"bytes": "187935"
},
{
"name": "Shell",
"bytes": "30686"
}
],
"symlink_target": ""
} |
package android.test;
import com.google.android.collect.Sets;
import android.content.Context;
import android.content.ContextWrapper;
import android.content.ContentProvider;
import android.database.DatabaseErrorHandler;
import android.database.sqlite.SQLiteDatabase;
import android.os.FileUtils;
import android.util.Log;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.util.Set;
/**
* This is a class which delegates to the given context, but performs database
* and file operations with a renamed database/file name (prefixes default
* names with a given prefix).
*/
public class RenamingDelegatingContext extends ContextWrapper {
private Context mFileContext;
private String mFilePrefix = null;
private File mCacheDir;
private final Object mSync = new Object();
private Set<String> mDatabaseNames = Sets.newHashSet();
private Set<String> mFileNames = Sets.newHashSet();
public static <T extends ContentProvider> T providerWithRenamedContext(
Class<T> contentProvider, Context c, String filePrefix)
throws IllegalAccessException, InstantiationException {
return providerWithRenamedContext(contentProvider, c, filePrefix, false);
}
public static <T extends ContentProvider> T providerWithRenamedContext(
Class<T> contentProvider, Context c, String filePrefix,
boolean allowAccessToExistingFilesAndDbs)
throws IllegalAccessException, InstantiationException {
Class<T> mProviderClass = contentProvider;
T mProvider = mProviderClass.newInstance();
RenamingDelegatingContext mContext = new RenamingDelegatingContext(c, filePrefix);
if (allowAccessToExistingFilesAndDbs) {
mContext.makeExistingFilesAndDbsAccessible();
}
mProvider.attachInfo(mContext, null);
return mProvider;
}
/**
* Makes accessible all files and databases whose names match the filePrefix that was passed to
* the constructor. Normally only files and databases that were created through this context are
* accessible.
*/
public void makeExistingFilesAndDbsAccessible() {
String[] databaseList = mFileContext.databaseList();
for (String diskName : databaseList) {
if (shouldDiskNameBeVisible(diskName)) {
mDatabaseNames.add(publicNameFromDiskName(diskName));
}
}
String[] fileList = mFileContext.fileList();
for (String diskName : fileList) {
if (shouldDiskNameBeVisible(diskName)) {
mFileNames.add(publicNameFromDiskName(diskName));
}
}
}
/**
* Returns if the given diskName starts with the given prefix or not.
* @param diskName name of the database/file.
*/
boolean shouldDiskNameBeVisible(String diskName) {
return diskName.startsWith(mFilePrefix);
}
/**
* Returns the public name (everything following the prefix) of the given diskName.
* @param diskName name of the database/file.
*/
String publicNameFromDiskName(String diskName) {
if (!shouldDiskNameBeVisible(diskName)) {
throw new IllegalArgumentException("disk file should not be visible: " + diskName);
}
return diskName.substring(mFilePrefix.length(), diskName.length());
}
/**
* @param context : the context that will be delagated.
* @param filePrefix : a prefix with which database and file names will be
* prefixed.
*/
public RenamingDelegatingContext(Context context, String filePrefix) {
super(context);
mFileContext = context;
mFilePrefix = filePrefix;
}
/**
* @param context : the context that will be delagated.
* @param fileContext : the context that file and db methods will be delgated to
* @param filePrefix : a prefix with which database and file names will be
* prefixed.
*/
public RenamingDelegatingContext(Context context, Context fileContext, String filePrefix) {
super(context);
mFileContext = fileContext;
mFilePrefix = filePrefix;
}
public String getDatabasePrefix() {
return mFilePrefix;
}
private String renamedFileName(String name) {
return mFilePrefix + name;
}
@Override
public SQLiteDatabase openOrCreateDatabase(String name,
int mode, SQLiteDatabase.CursorFactory factory) {
final String internalName = renamedFileName(name);
if (!mDatabaseNames.contains(name)) {
mDatabaseNames.add(name);
mFileContext.deleteDatabase(internalName);
}
return mFileContext.openOrCreateDatabase(internalName, mode, factory);
}
@Override
public SQLiteDatabase openOrCreateDatabase(String name,
int mode, SQLiteDatabase.CursorFactory factory, DatabaseErrorHandler errorHandler) {
final String internalName = renamedFileName(name);
if (!mDatabaseNames.contains(name)) {
mDatabaseNames.add(name);
mFileContext.deleteDatabase(internalName);
}
return mFileContext.openOrCreateDatabase(internalName, mode, factory, errorHandler);
}
@Override
public boolean deleteDatabase(String name) {
if (mDatabaseNames.contains(name)) {
mDatabaseNames.remove(name);
return mFileContext.deleteDatabase(renamedFileName(name));
} else {
return false;
}
}
@Override
public File getDatabasePath(String name) {
return mFileContext.getDatabasePath(renamedFileName(name));
}
@Override
public String[] databaseList() {
return mDatabaseNames.toArray(new String[]{});
}
@Override
public FileInputStream openFileInput(String name)
throws FileNotFoundException {
final String internalName = renamedFileName(name);
if (mFileNames.contains(name)) {
return mFileContext.openFileInput(internalName);
} else {
throw new FileNotFoundException(internalName);
}
}
@Override
public FileOutputStream openFileOutput(String name, int mode)
throws FileNotFoundException {
mFileNames.add(name);
return mFileContext.openFileOutput(renamedFileName(name), mode);
}
@Override
public File getFileStreamPath(String name) {
return mFileContext.getFileStreamPath(renamedFileName(name));
}
@Override
public boolean deleteFile(String name) {
if (mFileNames.contains(name)) {
mFileNames.remove(name);
return mFileContext.deleteFile(renamedFileName(name));
} else {
return false;
}
}
@Override
public String[] fileList() {
return mFileNames.toArray(new String[]{});
}
/**
* In order to support calls to getCacheDir(), we create a temp cache dir (inside the real
* one) and return it instead. This code is basically getCacheDir(), except it uses the real
* cache dir as the parent directory and creates a test cache dir inside that.
*/
@Override
public File getCacheDir() {
synchronized (mSync) {
if (mCacheDir == null) {
mCacheDir = new File(mFileContext.getCacheDir(), renamedFileName("cache"));
}
if (!mCacheDir.exists()) {
if(!mCacheDir.mkdirs()) {
Log.w("RenamingDelegatingContext", "Unable to create cache directory");
return null;
}
FileUtils.setPermissions(
mCacheDir.getPath(),
FileUtils.S_IRWXU|FileUtils.S_IRWXG|FileUtils.S_IXOTH,
-1, -1);
}
}
return mCacheDir;
}
// /**
// * Given an array of files returns only those whose names indicate that they belong to this
// * context.
// * @param allFiles the original list of files
// * @return the pruned list of files
// */
// private String[] prunedFileList(String[] allFiles) {
// List<String> files = Lists.newArrayList();
// for (String file : allFiles) {
// if (file.startsWith(mFilePrefix)) {
// files.add(file);
// }
// }
// return files.toArray(new String[]{});
// }
}
| {
"content_hash": "c3fda05d8d56a47e8e66cbee59b7c61d",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 100,
"avg_line_length": 34.4331983805668,
"alnum_prop": 0.6486772486772486,
"repo_name": "haikuowuya/android_system_code",
"id": "eee3ad7418220c6784f6a466c0120bfa8004dc62",
"size": "9124",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/android/test/RenamingDelegatingContext.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "182432"
},
{
"name": "Java",
"bytes": "124952631"
}
],
"symlink_target": ""
} |
Pristine
========
Pristine takes git repositories and generates beautiful, purely static HTML
summaries of the contents therein.
As opposed to hosting one's code on a third-party service, pristine generates
stand-alone project summaries that can be served from the user's own webservers,
emailed around, or whatever else. It's just static content - nothing fancy, ya'
know?
Install
-------
Pristine isn't on cabal yet, so for now, do the following (make sure you already
have the [haskell-platform](http://www.haskell.org/platform)):
$ git clone https://github.com/chrisdotcode/pristine
$ cd pristine
$ cabal sandbox init
$ cabal install -j --haddock-hyperlink-source
The pristine binary should be installed somewhere in `dist/`.
Documentation
-------------
Pristine - The git respository static summary generator
Usage:
pristine [--version] [DIRECTORY] [-n|--name ARG] [-l|--license LICENSE]
[-o|--output OUTPUT] [-m|--maintainer MAINTAINER]
[-e|--clone-link CLONELINK] [-d|--download-link DOWNLOADLINK]
Generates static summaries of git repositories.
### -h,--help
Show the help text.
### --version
Print version information.
### DIRECTORY
The directory containing the project's git repo.
### -n,--name ARG
The project's name (defaults to the repository's directory name).
### -l,--license LICENSE
The license for the project. This is one of:
* `none`
* `gpl2`
* `gpl3`
* `lgpl2`
* `lgpl3`
* `agpl3`
* `bsd2`
* `bsd3`
* `mit`
* `mpl2`
* `apache2`
* `public`
* `reserved`
### -o,--output OUTPUT
The directory to ouput the generated file.
### -m,--maintainer MAINTAINER
The maintainer of the project.
### -e,--clone-link CLONELINK
The project's git clone location.
### -d,--download-link DOWNLOADLINK
The project's download/home page, if one is offered.
| {
"content_hash": "cb82afaa85b11c94d8e2b949400b61e4",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 25,
"alnum_prop": 0.6969863013698631,
"repo_name": "chrisdotcode/pristine",
"id": "1c40502334d205ff0a31f3358a797c7eda351890",
"size": "1825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84611"
},
{
"name": "Haskell",
"bytes": "17621"
},
{
"name": "JavaScript",
"bytes": "14"
}
],
"symlink_target": ""
} |
using Aurora.EffectsEngine;
using Aurora.EffectsEngine.Animations;
using Aurora.Profiles.Minecraft.GSI;
using Aurora.Settings.Layers;
using Aurora.Utils;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Controls;
namespace Aurora.Profiles.Minecraft.Layers {
[Obsolete("This layer is obselete and has been replaced by the Overrides system.")]
public class MinecraftBurnLayerHandler : LayerHandler<LayerHandlerProperties> {
private List<FireParticle> particles = new List<FireParticle>();
private Random rnd = new Random();
protected override UserControl CreateControl() {
return new Control_MinecraftBurnLayer();
}
private void CreateFireParticle() {
float randomX = (float)rnd.NextDouble() * Effects.canvas_width;
float randomOffset = ((float)rnd.NextDouble() * 15) - 7.5f;
particles.Add(new FireParticle() {
mix = new AnimationMix(new[] {
new AnimationTrack("particle", 0)
.SetFrame(0, new AnimationFilledCircle(randomX, Effects.canvas_height + 5, 5, Color.FromArgb(255, 230, 0)))
.SetFrame(1, new AnimationFilledCircle(randomX + randomOffset, -6, 6, Color.FromArgb(0, 255, 230, 0)))
}),
time = 0
});
}
public override EffectLayer Render(IGameState gamestate) {
EffectLayer layer = new EffectLayer("Minecraft Burning Layer");
// Render nothing if invalid gamestate or player isn't on fire
if (!(gamestate is GameState_Minecraft) || !(gamestate as GameState_Minecraft).Player.IsBurning)
return layer;
// Set the background to red
layer.Fill(Color.Red);
// Add 3 particles every frame
for (int i = 0; i < 3; i++)
CreateFireParticle();
// Render all particles
foreach (var particle in particles) {
particle.mix.Draw(layer.GetGraphics(), particle.time);
particle.time += .1f;
}
// Remove any expired particles
particles.RemoveAll(particle => particle.time >= 1);
return layer;
}
}
internal class FireParticle {
internal AnimationMix mix;
internal float time;
}
}
| {
"content_hash": "8e30c7b0c0538489886aa7213a54cd80",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 131,
"avg_line_length": 35.927536231884055,
"alnum_prop": 0.6111335215812828,
"repo_name": "antonpup/Aurora",
"id": "7cfa248f05704f57624d7d31865c70f4aeaa138d",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Project-Aurora/Project-Aurora/Profiles/Minecraft/Layers/MinecraftBurnLayerHandler.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38358"
},
{
"name": "C#",
"bytes": "4076103"
},
{
"name": "C++",
"bytes": "206975"
},
{
"name": "Inno Setup",
"bytes": "7502"
},
{
"name": "JavaScript",
"bytes": "14557"
},
{
"name": "Python",
"bytes": "3754"
}
],
"symlink_target": ""
} |
package de.zib.scalaris;
import com.ericsson.otp.erlang.OtpErlangException;
import com.ericsson.otp.erlang.OtpErlangObject;
/**
* Exception that is thrown if a test_and_set operation on a scalaris ring
* fails because the old value did not match the expected value.
*
* Contains the old value stored in scalaris.
*
* @author Nico Kruber, kruber@zib.de
* @version 2.7
* @since 2.7
*/
public class KeyChangedException extends OtpErlangException {
/**
* class version for serialisation
*/
private static final long serialVersionUID = 1L;
/**
* The value stored in scalaris.
*/
private final OtpErlangObject oldValue;
/**
* Creates the exception with the given old value.
*
* @param old_value
* the old value stored in scalaris
*/
public KeyChangedException(final OtpErlangObject old_value) {
super();
this.oldValue = old_value;
}
/**
* Creates the exception with the given old value taking the message of the
* given throwable.
*
* @param e
* the exception to "re-throw"
* @param old_value
* the old value stored in scalaris
*/
public KeyChangedException(final Throwable e, final OtpErlangObject old_value) {
super(e.getMessage());
this.oldValue = old_value;
setStackTrace(e.getStackTrace());
}
/**
* Returns the (old) value stored in scalaris.
*
* @return the value
*/
public OtpErlangObject getOldValue() {
return oldValue;
}
}
| {
"content_hash": "4e7579b74c707a1f6fc1cd709242679d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 84,
"avg_line_length": 25.5,
"alnum_prop": 0.6299810246679317,
"repo_name": "Eonblast/Scalaxis",
"id": "6f855d4e96ff5fc2b813898f1946624dfb764918",
"size": "2210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java-api/src/de/zib/scalaris/KeyChangedException.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "829"
},
{
"name": "Erlang",
"bytes": "3288933"
},
{
"name": "Java",
"bytes": "1108596"
},
{
"name": "JavaScript",
"bytes": "3302"
},
{
"name": "Perl",
"bytes": "3911"
},
{
"name": "Python",
"bytes": "125449"
},
{
"name": "Ruby",
"bytes": "146035"
},
{
"name": "Shell",
"bytes": "216492"
}
],
"symlink_target": ""
} |
#ifndef RELATION_H
#define RELATION_H
#include "access/sdir.h"
#include "nodes/bitmapset.h"
#include "nodes/params.h"
#include "nodes/parsenodes.h"
#include "storage/block.h"
/*
* Relids
* Set of relation identifiers (indexes into the rangetable).
*/
typedef Bitmapset *Relids;
/*
* When looking for a "cheapest path", this enum specifies whether we want
* cheapest startup cost or cheapest total cost.
*/
typedef enum CostSelector
{
STARTUP_COST, TOTAL_COST
} CostSelector;
/*
* The cost estimate produced by cost_qual_eval() includes both a one-time
* (startup) cost, and a per-tuple cost.
*/
typedef struct QualCost
{
Cost startup; /* one-time cost */
Cost per_tuple; /* per-evaluation cost */
} QualCost;
/*----------
* PlannerGlobal
* Global information for planning/optimization
*
* PlannerGlobal holds state for an entire planner invocation; this state
* is shared across all levels of sub-Queries that exist in the command being
* planned.
*----------
*/
typedef struct PlannerGlobal
{
NodeTag type;
ParamListInfo boundParams; /* Param values provided to planner() */
List *paramlist; /* unused, will be removed in 9.3 */
List *subplans; /* Plans for SubPlan nodes */
List *subrtables; /* Rangetables for SubPlan nodes */
List *subrowmarks; /* PlanRowMarks for SubPlan nodes */
Bitmapset *rewindPlanIDs; /* indices of subplans that require REWIND */
List *finalrtable; /* "flat" rangetable for executor */
List *finalrowmarks; /* "flat" list of PlanRowMarks */
List *relationOids; /* OIDs of relations the plan depends on */
List *invalItems; /* other dependencies, as PlanInvalItems */
Index lastPHId; /* highest PlaceHolderVar ID assigned */
Index lastRowMarkId; /* highest PlanRowMark ID assigned */
bool transientPlan; /* redo plan when TransactionXmin changes? */
/* Added post-release, will be in a saner place in 9.3: */
int nParamExec; /* number of PARAM_EXEC Params used */
} PlannerGlobal;
/* macro for fetching the Plan associated with a SubPlan node */
#define planner_subplan_get_plan(root, subplan) \
((Plan *) list_nth((root)->glob->subplans, (subplan)->plan_id - 1))
/*----------
* PlannerInfo
* Per-query information for planning/optimization
*
* This struct is conventionally called "root" in all the planner routines.
* It holds links to all of the planner's working state, in addition to the
* original Query. Note that at present the planner extensively modifies
* the passed-in Query data structure; someday that should stop.
*----------
*/
typedef struct PlannerInfo
{
NodeTag type;
Query *parse; /* the Query being planned */
PlannerGlobal *glob; /* global info for current planner run */
Index query_level; /* 1 at the outermost Query */
struct PlannerInfo *parent_root; /* NULL at outermost Query */
/*
* simple_rel_array holds pointers to "base rels" and "other rels" (see
* comments for RelOptInfo for more info). It is indexed by rangetable
* index (so entry 0 is always wasted). Entries can be NULL when an RTE
* does not correspond to a base relation, such as a join RTE or an
* unreferenced view RTE; or if the RelOptInfo hasn't been made yet.
*/
struct RelOptInfo **simple_rel_array; /* All 1-rel RelOptInfos */
int simple_rel_array_size; /* allocated size of array */
/*
* simple_rte_array is the same length as simple_rel_array and holds
* pointers to the associated rangetable entries. This lets us avoid
* rt_fetch(), which can be a bit slow once large inheritance sets have
* been expanded.
*/
RangeTblEntry **simple_rte_array; /* rangetable as an array */
/*
* join_rel_list is a list of all join-relation RelOptInfos we have
* considered in this planning run. For small problems we just scan the
* list to do lookups, but when there are many join relations we build a
* hash table for faster lookups. The hash table is present and valid
* when join_rel_hash is not NULL. Note that we still maintain the list
* even when using the hash table for lookups; this simplifies life for
* GEQO.
*/
List *join_rel_list; /* list of join-relation RelOptInfos */
struct HTAB *join_rel_hash; /* optional hashtable for join relations */
/*
* When doing a dynamic-programming-style join search, join_rel_level[k]
* is a list of all join-relation RelOptInfos of level k, and
* join_cur_level is the current level. New join-relation RelOptInfos are
* automatically added to the join_rel_level[join_cur_level] list.
* join_rel_level is NULL if not in use.
*/
List **join_rel_level; /* lists of join-relation RelOptInfos */
int join_cur_level; /* index of list being extended */
List *resultRelations; /* integer list of RT indexes, or NIL */
List *init_plans; /* init SubPlans for query */
List *cte_plan_ids; /* per-CTE-item list of subplan IDs */
List *eq_classes; /* list of active EquivalenceClasses */
List *canon_pathkeys; /* list of "canonical" PathKeys */
List *left_join_clauses; /* list of RestrictInfos for
* mergejoinable outer join clauses
* w/nonnullable var on left */
List *right_join_clauses; /* list of RestrictInfos for
* mergejoinable outer join clauses
* w/nonnullable var on right */
List *full_join_clauses; /* list of RestrictInfos for
* mergejoinable full join clauses */
List *join_info_list; /* list of SpecialJoinInfos */
List *append_rel_list; /* list of AppendRelInfos */
List *rowMarks; /* list of PlanRowMarks */
List *placeholder_list; /* list of PlaceHolderInfos */
List *query_pathkeys; /* desired pathkeys for query_planner(), and
* actual pathkeys afterwards */
List *group_pathkeys; /* groupClause pathkeys, if any */
List *window_pathkeys; /* pathkeys of bottom window, if any */
List *distinct_pathkeys; /* distinctClause pathkeys, if any */
List *sort_pathkeys; /* sortClause pathkeys, if any */
List *initial_rels; /* RelOptInfos we are now trying to join */
MemoryContext planner_cxt; /* context holding PlannerInfo */
double total_table_pages; /* # of pages in all tables of query */
double tuple_fraction; /* tuple_fraction passed to query_planner */
bool hasInheritedTarget; /* true if parse->resultRelation is an
* inheritance child rel */
bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */
bool hasHavingQual; /* true if havingQual was non-null */
bool hasPseudoConstantQuals; /* true if any RestrictInfo has
* pseudoconstant = true */
bool hasRecursion; /* true if planning a recursive WITH item */
/* These fields are used only when hasRecursion is true: */
int wt_param_id; /* PARAM_EXEC ID for the work table */
struct Plan *non_recursive_plan; /* plan for non-recursive term */
/* optional private data for join_search_hook, e.g., GEQO */
void *join_search_private;
/* Added post-release, will be in a saner place in 9.3: */
List *plan_params; /* list of PlannerParamItems, see below */
} PlannerInfo;
/*
* In places where it's known that simple_rte_array[] must have been prepared
* already, we just index into it to fetch RTEs. In code that might be
* executed before or after entering query_planner(), use this macro.
*/
#define planner_rt_fetch(rti, root) \
((root)->simple_rte_array ? (root)->simple_rte_array[rti] : \
rt_fetch(rti, (root)->parse->rtable))
/*----------
* RelOptInfo
* Per-relation information for planning/optimization
*
* For planning purposes, a "base rel" is either a plain relation (a table)
* or the output of a sub-SELECT or function that appears in the range table.
* In either case it is uniquely identified by an RT index. A "joinrel"
* is the joining of two or more base rels. A joinrel is identified by
* the set of RT indexes for its component baserels. We create RelOptInfo
* nodes for each baserel and joinrel, and store them in the PlannerInfo's
* simple_rel_array and join_rel_list respectively.
*
* Note that there is only one joinrel for any given set of component
* baserels, no matter what order we assemble them in; so an unordered
* set is the right datatype to identify it with.
*
* We also have "other rels", which are like base rels in that they refer to
* single RT indexes; but they are not part of the join tree, and are given
* a different RelOptKind to identify them. Lastly, there is a RelOptKind
* for "dead" relations, which are base rels that we have proven we don't
* need to join after all.
*
* Currently the only kind of otherrels are those made for member relations
* of an "append relation", that is an inheritance set or UNION ALL subquery.
* An append relation has a parent RTE that is a base rel, which represents
* the entire append relation. The member RTEs are otherrels. The parent
* is present in the query join tree but the members are not. The member
* RTEs and otherrels are used to plan the scans of the individual tables or
* subqueries of the append set; then the parent baserel is given an Append
* plan comprising the best plans for the individual member rels. (See
* comments for AppendRelInfo for more information.)
*
* At one time we also made otherrels to represent join RTEs, for use in
* handling join alias Vars. Currently this is not needed because all join
* alias Vars are expanded to non-aliased form during preprocess_expression.
*
* Parts of this data structure are specific to various scan and join
* mechanisms. It didn't seem worth creating new node types for them.
*
* relids - Set of base-relation identifiers; it is a base relation
* if there is just one, a join relation if more than one
* rows - estimated number of tuples in the relation after restriction
* clauses have been applied (ie, output rows of a plan for it)
* width - avg. number of bytes per tuple in the relation after the
* appropriate projections have been done (ie, output width)
* reltargetlist - List of Var and PlaceHolderVar nodes for the values
* we need to output from this relation.
* List is in no particular order, but all rels of an
* appendrel set must use corresponding orders.
* NOTE: in a child relation, may contain RowExpr or
* ConvertRowtypeExpr representing a whole-row Var.
* pathlist - List of Path nodes, one for each potentially useful
* method of generating the relation
* cheapest_startup_path - the pathlist member with lowest startup cost
* (regardless of its ordering)
* cheapest_total_path - the pathlist member with lowest total cost
* (regardless of its ordering)
* cheapest_unique_path - for caching cheapest path to produce unique
* (no duplicates) output from relation
*
* If the relation is a base relation it will have these fields set:
*
* relid - RTE index (this is redundant with the relids field, but
* is provided for convenience of access)
* rtekind - distinguishes plain relation, subquery, or function RTE
* min_attr, max_attr - range of valid AttrNumbers for rel
* attr_needed - array of bitmapsets indicating the highest joinrel
* in which each attribute is needed; if bit 0 is set then
* the attribute is needed as part of final targetlist
* attr_widths - cache space for per-attribute width estimates;
* zero means not computed yet
* indexlist - list of IndexOptInfo nodes for relation's indexes
* (always NIL if it's not a table)
* pages - number of disk pages in relation (zero if not a table)
* tuples - number of tuples in relation (not considering restrictions)
* subplan - plan for subquery (NULL if it's not a subquery)
* subrtable - rangetable for subquery (NIL if it's not a subquery)
* subrowmark - rowmarks for subquery (NIL if it's not a subquery)
*
* Note: for a subquery, tuples and subplan are not set immediately
* upon creation of the RelOptInfo object; they are filled in when
* set_base_rel_pathlist processes the object.
*
* For otherrels that are appendrel members, these fields are filled
* in just as for a baserel.
*
* The presence of the remaining fields depends on the restrictions
* and joins that the relation participates in:
*
* baserestrictinfo - List of RestrictInfo nodes, containing info about
* each non-join qualification clause in which this relation
* participates (only used for base rels)
* baserestrictcost - Estimated cost of evaluating the baserestrictinfo
* clauses at a single tuple (only used for base rels)
* joininfo - List of RestrictInfo nodes, containing info about each
* join clause in which this relation participates (but
* note this excludes clauses that might be derivable from
* EquivalenceClasses)
* has_eclass_joins - flag that EquivalenceClass joins are possible
* index_outer_relids - only used for base rels; set of outer relids
* that participate in indexable joinclauses for this rel
* index_inner_paths - only used for base rels; list of InnerIndexscanInfo
* nodes showing best indexpaths for various subsets of
* index_outer_relids.
*
* Note: Keeping a restrictinfo list in the RelOptInfo is useful only for
* base rels, because for a join rel the set of clauses that are treated as
* restrict clauses varies depending on which sub-relations we choose to join.
* (For example, in a 3-base-rel join, a clause relating rels 1 and 2 must be
* treated as a restrictclause if we join {1} and {2 3} to make {1 2 3}; but
* if we join {1 2} and {3} then that clause will be a restrictclause in {1 2}
* and should not be processed again at the level of {1 2 3}.) Therefore,
* the restrictinfo list in the join case appears in individual JoinPaths
* (field joinrestrictinfo), not in the parent relation. But it's OK for
* the RelOptInfo to store the joininfo list, because that is the same
* for a given rel no matter how we form it.
*
* We store baserestrictcost in the RelOptInfo (for base relations) because
* we know we will need it at least once (to price the sequential scan)
* and may need it multiple times to price index scans.
*----------
*/
typedef enum RelOptKind
{
RELOPT_BASEREL,
RELOPT_JOINREL,
RELOPT_OTHER_MEMBER_REL,
RELOPT_DEADREL
} RelOptKind;
typedef struct RelOptInfo
{
NodeTag type;
RelOptKind reloptkind;
/* all relations included in this RelOptInfo */
Relids relids; /* set of base relids (rangetable indexes) */
/* size estimates generated by planner */
double rows; /* estimated number of result tuples */
int width; /* estimated avg width of result tuples */
/* materialization information */
List *reltargetlist; /* Vars to be output by scan of relation */
List *pathlist; /* Path structures */
struct Path *cheapest_startup_path;
struct Path *cheapest_total_path;
struct Path *cheapest_unique_path;
/* information about a base rel (not set for join rels!) */
Index relid;
Oid reltablespace; /* containing tablespace */
RTEKind rtekind; /* RELATION, SUBQUERY, or FUNCTION */
AttrNumber min_attr; /* smallest attrno of rel (often <0) */
AttrNumber max_attr; /* largest attrno of rel */
Relids *attr_needed; /* array indexed [min_attr .. max_attr] */
int32 *attr_widths; /* array indexed [min_attr .. max_attr] */
List *indexlist; /* list of IndexOptInfo */
BlockNumber pages;
double tuples;
struct Plan *subplan; /* if subquery */
List *subrtable; /* if subquery */
List *subrowmark; /* if subquery */
/* used by various scans and joins: */
List *baserestrictinfo; /* RestrictInfo structures (if base
* rel) */
QualCost baserestrictcost; /* cost of evaluating the above */
List *joininfo; /* RestrictInfo structures for join clauses
* involving this rel */
bool has_eclass_joins; /* T means joininfo is incomplete */
/* cached info about inner indexscan paths for relation: */
Relids index_outer_relids; /* other relids in indexable join
* clauses */
List *index_inner_paths; /* InnerIndexscanInfo nodes */
/*
* Inner indexscans are not in the main pathlist because they are not
* usable except in specific join contexts. We use the index_inner_paths
* list just to avoid recomputing the best inner indexscan repeatedly for
* similar outer relations. See comments for InnerIndexscanInfo.
*/
} RelOptInfo;
/*
* IndexOptInfo
* Per-index information for planning/optimization
*
* Prior to Postgres 7.0, RelOptInfo was used to describe both relations
* and indexes, but that created confusion without actually doing anything
* useful. So now we have a separate IndexOptInfo struct for indexes.
*
* opfamily[], indexkeys[], opcintype[], fwdsortop[], revsortop[],
* and nulls_first[] each have ncolumns entries.
* Note: for historical reasons, the opfamily array has an extra entry
* that is always zero. Some code scans until it sees a zero entry,
* rather than looking at ncolumns.
*
* Zeroes in the indexkeys[] array indicate index columns that are
* expressions; there is one element in indexprs for each such column.
*
* For an unordered index, the sortop arrays contains zeroes. Note that
* fwdsortop[] and nulls_first[] describe the sort ordering of a forward
* indexscan; we can also consider a backward indexscan, which will
* generate sort order described by revsortop/!nulls_first.
*
* The indexprs and indpred expressions have been run through
* prepqual.c and eval_const_expressions() for ease of matching to
* WHERE clauses. indpred is in implicit-AND form.
*/
typedef struct IndexOptInfo
{
NodeTag type;
Oid indexoid; /* OID of the index relation */
Oid reltablespace; /* tablespace of index (not table) */
RelOptInfo *rel; /* back-link to index's table */
/* statistics from pg_class */
BlockNumber pages; /* number of disk pages in index */
double tuples; /* number of index tuples in index */
/* index descriptor information */
int ncolumns; /* number of columns in index */
Oid *opfamily; /* OIDs of operator families for columns */
int *indexkeys; /* column numbers of index's keys, or 0 */
Oid *opcintype; /* OIDs of opclass declared input data types */
Oid *fwdsortop; /* OIDs of sort operators for each column */
Oid *revsortop; /* OIDs of sort operators for backward scan */
bool *nulls_first; /* do NULLs come first in the sort order? */
Oid relam; /* OID of the access method (in pg_am) */
RegProcedure amcostestimate; /* OID of the access method's cost fcn */
List *indexprs; /* expressions for non-simple index columns */
List *indpred; /* predicate if a partial index, else NIL */
bool predOK; /* true if predicate matches query */
bool unique; /* true if a unique index */
bool amoptionalkey; /* can query omit key for the first column? */
bool amsearchnulls; /* can AM search for NULL/NOT NULL entries? */
bool amhasgettuple; /* does AM have amgettuple interface? */
bool amhasgetbitmap; /* does AM have amgetbitmap interface? */
/* added in 9.0.4: */
bool hypothetical; /* true if index doesn't really exist */
/* added in 9.0.6: */
bool immediate; /* is uniqueness enforced immediately? */
} IndexOptInfo;
/*
* EquivalenceClasses
*
* Whenever we can determine that a mergejoinable equality clause A = B is
* not delayed by any outer join, we create an EquivalenceClass containing
* the expressions A and B to record this knowledge. If we later find another
* equivalence B = C, we add C to the existing EquivalenceClass; this may
* require merging two existing EquivalenceClasses. At the end of the qual
* distribution process, we have sets of values that are known all transitively
* equal to each other, where "equal" is according to the rules of the btree
* operator family(s) shown in ec_opfamilies. (We restrict an EC to contain
* only equalities whose operators belong to the same set of opfamilies. This
* could probably be relaxed, but for now it's not worth the trouble, since
* nearly all equality operators belong to only one btree opclass anyway.)
*
* We also use EquivalenceClasses as the base structure for PathKeys, letting
* us represent knowledge about different sort orderings being equivalent.
* Since every PathKey must reference an EquivalenceClass, we will end up
* with single-member EquivalenceClasses whenever a sort key expression has
* not been equivalenced to anything else. It is also possible that such an
* EquivalenceClass will contain a volatile expression ("ORDER BY random()"),
* which is a case that can't arise otherwise since clauses containing
* volatile functions are never considered mergejoinable. We mark such
* EquivalenceClasses specially to prevent them from being merged with
* ordinary EquivalenceClasses. Also, for volatile expressions we have
* to be careful to match the EquivalenceClass to the correct targetlist
* entry: consider SELECT random() AS a, random() AS b ... ORDER BY b,a.
* So we record the SortGroupRef of the originating sort clause.
*
* We allow equality clauses appearing below the nullable side of an outer join
* to form EquivalenceClasses, but these have a slightly different meaning:
* the included values might be all NULL rather than all the same non-null
* values. See src/backend/optimizer/README for more on that point.
*
* NB: if ec_merged isn't NULL, this class has been merged into another, and
* should be ignored in favor of using the pointed-to class.
*/
typedef struct EquivalenceClass
{
NodeTag type;
List *ec_opfamilies; /* btree operator family OIDs */
List *ec_members; /* list of EquivalenceMembers */
List *ec_sources; /* list of generating RestrictInfos */
List *ec_derives; /* list of derived RestrictInfos */
Relids ec_relids; /* all relids appearing in ec_members */
bool ec_has_const; /* any pseudoconstants in ec_members? */
bool ec_has_volatile; /* the (sole) member is a volatile expr */
bool ec_below_outer_join; /* equivalence applies below an OJ */
bool ec_broken; /* failed to generate needed clauses? */
Index ec_sortref; /* originating sortclause label, or 0 */
struct EquivalenceClass *ec_merged; /* set if merged into another EC */
} EquivalenceClass;
/*
* If an EC contains a const and isn't below-outer-join, any PathKey depending
* on it must be redundant, since there's only one possible value of the key.
*/
#define EC_MUST_BE_REDUNDANT(eclass) \
((eclass)->ec_has_const && !(eclass)->ec_below_outer_join)
/*
* EquivalenceMember - one member expression of an EquivalenceClass
*
* em_is_child signifies that this element was built by transposing a member
* for an inheritance parent relation to represent the corresponding expression
* on an inheritance child. The element should be ignored for all purposes
* except constructing inner-indexscan paths for the child relation. (Other
* types of join are driven from transposed joininfo-list entries.) Note
* that the EC's ec_relids field does NOT include the child relation.
*
* em_datatype is usually the same as exprType(em_expr), but can be
* different when dealing with a binary-compatible opfamily; in particular
* anyarray_ops would never work without this. Use em_datatype when
* looking up a specific btree operator to work with this expression.
*/
typedef struct EquivalenceMember
{
NodeTag type;
Expr *em_expr; /* the expression represented */
Relids em_relids; /* all relids appearing in em_expr */
Relids em_nullable_relids; /* nullable by lower outer joins */
bool em_is_const; /* expression is pseudoconstant? */
bool em_is_child; /* derived version for a child relation? */
Oid em_datatype; /* the "nominal type" used by the opfamily */
} EquivalenceMember;
/*
* PathKeys
*
* The sort ordering of a path is represented by a list of PathKey nodes.
* An empty list implies no known ordering. Otherwise the first item
* represents the primary sort key, the second the first secondary sort key,
* etc. The value being sorted is represented by linking to an
* EquivalenceClass containing that value and including pk_opfamily among its
* ec_opfamilies. This is a convenient method because it makes it trivial
* to detect equivalent and closely-related orderings. (See optimizer/README
* for more information.)
*
* Note: pk_strategy is either BTLessStrategyNumber (for ASC) or
* BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
* index types will use btree-compatible strategy numbers.
*/
typedef struct PathKey
{
NodeTag type;
EquivalenceClass *pk_eclass; /* the value that is ordered */
Oid pk_opfamily; /* btree opfamily defining the ordering */
int pk_strategy; /* sort direction (ASC or DESC) */
bool pk_nulls_first; /* do NULLs come before normal values? */
} PathKey;
/*
* Type "Path" is used as-is for sequential-scan paths, as well as some other
* simple plan types that we don't need any extra information in the path for.
* For other path types it is the first component of a larger struct.
*
* Note: "pathtype" is the NodeTag of the Plan node we could build from this
* Path. It is partially redundant with the Path's NodeTag, but allows us
* to use the same Path type for multiple Plan types where there is no need
* to distinguish the Plan type during path processing.
*/
typedef struct Path
{
NodeTag type;
NodeTag pathtype; /* tag identifying scan/join method */
RelOptInfo *parent; /* the relation this path can build */
/* estimated execution costs for path (see costsize.c for more info) */
Cost startup_cost; /* cost expended before fetching any tuples */
Cost total_cost; /* total cost (assuming all tuples fetched) */
List *pathkeys; /* sort ordering of path's output */
/* pathkeys is a List of PathKey nodes; see above */
} Path;
/*----------
* IndexPath represents an index scan over a single index.
*
* 'indexinfo' is the index to be scanned.
*
* 'indexclauses' is a list of index qualification clauses, with implicit
* AND semantics across the list. Each clause is a RestrictInfo node from
* the query's WHERE or JOIN conditions.
*
* 'indexquals' has the same structure as 'indexclauses', but it contains
* the actual indexqual conditions that can be used with the index.
* In simple cases this is identical to 'indexclauses', but when special
* indexable operators appear in 'indexclauses', they are replaced by the
* derived indexscannable conditions in 'indexquals'.
*
* 'isjoininner' is TRUE if the path is a nestloop inner scan (that is,
* some of the index conditions are join rather than restriction clauses).
* Note that the path costs will be calculated differently from a plain
* indexscan in this case, and in addition there's a special 'rows' value
* different from the parent RelOptInfo's (see below).
*
* 'indexscandir' is one of:
* ForwardScanDirection: forward scan of an ordered index
* BackwardScanDirection: backward scan of an ordered index
* NoMovementScanDirection: scan of an unordered index, or don't care
* (The executor doesn't care whether it gets ForwardScanDirection or
* NoMovementScanDirection for an indexscan, but the planner wants to
* distinguish ordered from unordered indexes for building pathkeys.)
*
* 'indextotalcost' and 'indexselectivity' are saved in the IndexPath so that
* we need not recompute them when considering using the same index in a
* bitmap index/heap scan (see BitmapHeapPath). The costs of the IndexPath
* itself represent the costs of an IndexScan plan type.
*
* 'rows' is the estimated result tuple count for the indexscan. This
* is the same as path.parent->rows for a simple indexscan, but it is
* different for a nestloop inner scan, because the additional indexquals
* coming from join clauses make the scan more selective than the parent
* rel's restrict clauses alone would do.
*----------
*/
typedef struct IndexPath
{
Path path;
IndexOptInfo *indexinfo;
List *indexclauses;
List *indexquals;
bool isjoininner;
ScanDirection indexscandir;
Cost indextotalcost;
Selectivity indexselectivity;
double rows; /* estimated number of result tuples */
} IndexPath;
/*
* BitmapHeapPath represents one or more indexscans that generate TID bitmaps
* instead of directly accessing the heap, followed by AND/OR combinations
* to produce a single bitmap, followed by a heap scan that uses the bitmap.
* Note that the output is always considered unordered, since it will come
* out in physical heap order no matter what the underlying indexes did.
*
* The individual indexscans are represented by IndexPath nodes, and any
* logic on top of them is represented by a tree of BitmapAndPath and
* BitmapOrPath nodes. Notice that we can use the same IndexPath node both
* to represent a regular IndexScan plan, and as the child of a BitmapHeapPath
* that represents scanning the same index using a BitmapIndexScan. The
* startup_cost and total_cost figures of an IndexPath always represent the
* costs to use it as a regular IndexScan. The costs of a BitmapIndexScan
* can be computed using the IndexPath's indextotalcost and indexselectivity.
*
* BitmapHeapPaths can be nestloop inner indexscans. The isjoininner and
* rows fields serve the same purpose as for plain IndexPaths.
*/
typedef struct BitmapHeapPath
{
Path path;
Path *bitmapqual; /* IndexPath, BitmapAndPath, BitmapOrPath */
bool isjoininner; /* T if it's a nestloop inner scan */
double rows; /* estimated number of result tuples */
} BitmapHeapPath;
/*
* BitmapAndPath represents a BitmapAnd plan node; it can only appear as
* part of the substructure of a BitmapHeapPath. The Path structure is
* a bit more heavyweight than we really need for this, but for simplicity
* we make it a derivative of Path anyway.
*/
typedef struct BitmapAndPath
{
Path path;
List *bitmapquals; /* IndexPaths and BitmapOrPaths */
Selectivity bitmapselectivity;
} BitmapAndPath;
/*
* BitmapOrPath represents a BitmapOr plan node; it can only appear as
* part of the substructure of a BitmapHeapPath. The Path structure is
* a bit more heavyweight than we really need for this, but for simplicity
* we make it a derivative of Path anyway.
*/
typedef struct BitmapOrPath
{
Path path;
List *bitmapquals; /* IndexPaths and BitmapAndPaths */
Selectivity bitmapselectivity;
} BitmapOrPath;
/*
* TidPath represents a scan by TID
*
* tidquals is an implicitly OR'ed list of qual expressions of the form
* "CTID = pseudoconstant" or "CTID = ANY(pseudoconstant_array)".
* Note they are bare expressions, not RestrictInfos.
*/
typedef struct TidPath
{
Path path;
List *tidquals; /* qual(s) involving CTID = something */
} TidPath;
/*
* AppendPath represents an Append plan, ie, successive execution of
* several member plans.
*
* Note: it is possible for "subpaths" to contain only one, or even no,
* elements. These cases are optimized during create_append_plan.
* In particular, an AppendPath with no subpaths is a "dummy" path that
* is created to represent the case that a relation is provably empty.
*/
typedef struct AppendPath
{
Path path;
List *subpaths; /* list of component Paths */
} AppendPath;
#define IS_DUMMY_PATH(p) \
(IsA((p), AppendPath) && ((AppendPath *) (p))->subpaths == NIL)
/*
* ResultPath represents use of a Result plan node to compute a variable-free
* targetlist with no underlying tables (a "SELECT expressions" query).
* The query could have a WHERE clause, too, represented by "quals".
*
* Note that quals is a list of bare clauses, not RestrictInfos.
*/
typedef struct ResultPath
{
Path path;
List *quals;
} ResultPath;
/*
* MaterialPath represents use of a Material plan node, i.e., caching of
* the output of its subpath. This is used when the subpath is expensive
* and needs to be scanned repeatedly, or when we need mark/restore ability
* and the subpath doesn't have it.
*/
typedef struct MaterialPath
{
Path path;
Path *subpath;
} MaterialPath;
/*
* UniquePath represents elimination of distinct rows from the output of
* its subpath.
*
* This is unlike the other Path nodes in that it can actually generate
* different plans: either hash-based or sort-based implementation, or a
* no-op if the input path can be proven distinct already. The decision
* is sufficiently localized that it's not worth having separate Path node
* types. (Note: in the no-op case, we could eliminate the UniquePath node
* entirely and just return the subpath; but it's convenient to have a
* UniquePath in the path tree to signal upper-level routines that the input
* is known distinct.)
*/
typedef enum
{
UNIQUE_PATH_NOOP, /* input is known unique already */
UNIQUE_PATH_HASH, /* use hashing */
UNIQUE_PATH_SORT /* use sorting */
} UniquePathMethod;
typedef struct UniquePath
{
Path path;
Path *subpath;
UniquePathMethod umethod;
List *in_operators; /* equality operators of the IN clause */
List *uniq_exprs; /* expressions to be made unique */
double rows; /* estimated number of result tuples */
} UniquePath;
/*
* All join-type paths share these fields.
*/
typedef struct JoinPath
{
Path path;
JoinType jointype;
Path *outerjoinpath; /* path for the outer side of the join */
Path *innerjoinpath; /* path for the inner side of the join */
List *joinrestrictinfo; /* RestrictInfos to apply to join */
/*
* See the notes for RelOptInfo to understand why joinrestrictinfo is
* needed in JoinPath, and can't be merged into the parent RelOptInfo.
*/
} JoinPath;
/*
* A nested-loop path needs no special fields.
*/
typedef JoinPath NestPath;
/*
* A mergejoin path has these fields.
*
* Unlike other path types, a MergePath node doesn't represent just a single
* run-time plan node: it can represent up to four. Aside from the MergeJoin
* node itself, there can be a Sort node for the outer input, a Sort node
* for the inner input, and/or a Material node for the inner input. We could
* represent these nodes by separate path nodes, but considering how many
* different merge paths are investigated during a complex join problem,
* it seems better to avoid unnecessary palloc overhead.
*
* path_mergeclauses lists the clauses (in the form of RestrictInfos)
* that will be used in the merge.
*
* Note that the mergeclauses are a subset of the parent relation's
* restriction-clause list. Any join clauses that are not mergejoinable
* appear only in the parent's restrict list, and must be checked by a
* qpqual at execution time.
*
* outersortkeys (resp. innersortkeys) is NIL if the outer path
* (resp. inner path) is already ordered appropriately for the
* mergejoin. If it is not NIL then it is a PathKeys list describing
* the ordering that must be created by an explicit Sort node.
*
* materialize_inner is TRUE if a Material node should be placed atop the
* inner input. This may appear with or without an inner Sort step.
*/
typedef struct MergePath
{
JoinPath jpath;
List *path_mergeclauses; /* join clauses to be used for merge */
List *outersortkeys; /* keys for explicit sort, if any */
List *innersortkeys; /* keys for explicit sort, if any */
bool materialize_inner; /* add Materialize to inner? */
} MergePath;
/*
* A hashjoin path has these fields.
*
* The remarks above for mergeclauses apply for hashclauses as well.
*
* Hashjoin does not care what order its inputs appear in, so we have
* no need for sortkeys.
*/
typedef struct HashPath
{
JoinPath jpath;
List *path_hashclauses; /* join clauses used for hashing */
int num_batches; /* number of batches expected */
} HashPath;
/*
* Restriction clause info.
*
* We create one of these for each AND sub-clause of a restriction condition
* (WHERE or JOIN/ON clause). Since the restriction clauses are logically
* ANDed, we can use any one of them or any subset of them to filter out
* tuples, without having to evaluate the rest. The RestrictInfo node itself
* stores data used by the optimizer while choosing the best query plan.
*
* If a restriction clause references a single base relation, it will appear
* in the baserestrictinfo list of the RelOptInfo for that base rel.
*
* If a restriction clause references more than one base rel, it will
* appear in the joininfo list of every RelOptInfo that describes a strict
* subset of the base rels mentioned in the clause. The joininfo lists are
* used to drive join tree building by selecting plausible join candidates.
* The clause cannot actually be applied until we have built a join rel
* containing all the base rels it references, however.
*
* When we construct a join rel that includes all the base rels referenced
* in a multi-relation restriction clause, we place that clause into the
* joinrestrictinfo lists of paths for the join rel, if neither left nor
* right sub-path includes all base rels referenced in the clause. The clause
* will be applied at that join level, and will not propagate any further up
* the join tree. (Note: the "predicate migration" code was once intended to
* push restriction clauses up and down the plan tree based on evaluation
* costs, but it's dead code and is unlikely to be resurrected in the
* foreseeable future.)
*
* Note that in the presence of more than two rels, a multi-rel restriction
* might reach different heights in the join tree depending on the join
* sequence we use. So, these clauses cannot be associated directly with
* the join RelOptInfo, but must be kept track of on a per-join-path basis.
*
* RestrictInfos that represent equivalence conditions (i.e., mergejoinable
* equalities that are not outerjoin-delayed) are handled a bit differently.
* Initially we attach them to the EquivalenceClasses that are derived from
* them. When we construct a scan or join path, we look through all the
* EquivalenceClasses and generate derived RestrictInfos representing the
* minimal set of conditions that need to be checked for this particular scan
* or join to enforce that all members of each EquivalenceClass are in fact
* equal in all rows emitted by the scan or join.
*
* When dealing with outer joins we have to be very careful about pushing qual
* clauses up and down the tree. An outer join's own JOIN/ON conditions must
* be evaluated exactly at that join node, unless they are "degenerate"
* conditions that reference only Vars from the nullable side of the join.
* Quals appearing in WHERE or in a JOIN above the outer join cannot be pushed
* down below the outer join, if they reference any nullable Vars.
* RestrictInfo nodes contain a flag to indicate whether a qual has been
* pushed down to a lower level than its original syntactic placement in the
* join tree would suggest. If an outer join prevents us from pushing a qual
* down to its "natural" semantic level (the level associated with just the
* base rels used in the qual) then we mark the qual with a "required_relids"
* value including more than just the base rels it actually uses. By
* pretending that the qual references all the rels required to form the outer
* join, we prevent it from being evaluated below the outer join's joinrel.
* When we do form the outer join's joinrel, we still need to distinguish
* those quals that are actually in that join's JOIN/ON condition from those
* that appeared elsewhere in the tree and were pushed down to the join rel
* because they used no other rels. That's what the is_pushed_down flag is
* for; it tells us that a qual is not an OUTER JOIN qual for the set of base
* rels listed in required_relids. A clause that originally came from WHERE
* or an INNER JOIN condition will *always* have its is_pushed_down flag set.
* It's possible for an OUTER JOIN clause to be marked is_pushed_down too,
* if we decide that it can be pushed down into the nullable side of the join.
* In that case it acts as a plain filter qual for wherever it gets evaluated.
* (In short, is_pushed_down is only false for non-degenerate outer join
* conditions. Possibly we should rename it to reflect that meaning?)
*
* RestrictInfo nodes also contain an outerjoin_delayed flag, which is true
* if the clause's applicability must be delayed due to any outer joins
* appearing below it (ie, it has to be postponed to some join level higher
* than the set of relations it actually references). There is also a
* nullable_relids field, which is the set of rels it references that can be
* forced null by some outer join below the clause. outerjoin_delayed = true
* is subtly different from nullable_relids != NULL: a clause might reference
* some nullable rels and yet not be outerjoin_delayed because it also
* references all the other rels of the outer join(s). A clause that is not
* outerjoin_delayed can be enforced anywhere it is computable.
*
* In general, the referenced clause might be arbitrarily complex. The
* kinds of clauses we can handle as indexscan quals, mergejoin clauses,
* or hashjoin clauses are limited (e.g., no volatile functions). The code
* for each kind of path is responsible for identifying the restrict clauses
* it can use and ignoring the rest. Clauses not implemented by an indexscan,
* mergejoin, or hashjoin will be placed in the plan qual or joinqual field
* of the finished Plan node, where they will be enforced by general-purpose
* qual-expression-evaluation code. (But we are still entitled to count
* their selectivity when estimating the result tuple count, if we
* can guess what it is...)
*
* When the referenced clause is an OR clause, we generate a modified copy
* in which additional RestrictInfo nodes are inserted below the top-level
* OR/AND structure. This is a convenience for OR indexscan processing:
* indexquals taken from either the top level or an OR subclause will have
* associated RestrictInfo nodes.
*
* The can_join flag is set true if the clause looks potentially useful as
* a merge or hash join clause, that is if it is a binary opclause with
* nonoverlapping sets of relids referenced in the left and right sides.
* (Whether the operator is actually merge or hash joinable isn't checked,
* however.)
*
* The pseudoconstant flag is set true if the clause contains no Vars of
* the current query level and no volatile functions. Such a clause can be
* pulled out and used as a one-time qual in a gating Result node. We keep
* pseudoconstant clauses in the same lists as other RestrictInfos so that
* the regular clause-pushing machinery can assign them to the correct join
* level, but they need to be treated specially for cost and selectivity
* estimates. Note that a pseudoconstant clause can never be an indexqual
* or merge or hash join clause, so it's of no interest to large parts of
* the planner.
*
* When join clauses are generated from EquivalenceClasses, there may be
* several equally valid ways to enforce join equivalence, of which we need
* apply only one. We mark clauses of this kind by setting parent_ec to
* point to the generating EquivalenceClass. Multiple clauses with the same
* parent_ec in the same join are redundant.
*/
typedef struct RestrictInfo
{
NodeTag type;
Expr *clause; /* the represented clause of WHERE or JOIN */
bool is_pushed_down; /* TRUE if clause was pushed down in level */
bool outerjoin_delayed; /* TRUE if delayed by lower outer join */
bool can_join; /* see comment above */
bool pseudoconstant; /* see comment above */
/* The set of relids (varnos) actually referenced in the clause: */
Relids clause_relids;
/* The set of relids required to evaluate the clause: */
Relids required_relids;
/* The relids used in the clause that are nullable by lower outer joins: */
Relids nullable_relids;
/* These fields are set for any binary opclause: */
Relids left_relids; /* relids in left side of clause */
Relids right_relids; /* relids in right side of clause */
/* This field is NULL unless clause is an OR clause: */
Expr *orclause; /* modified clause with RestrictInfos */
/* This field is NULL unless clause is potentially redundant: */
EquivalenceClass *parent_ec; /* generating EquivalenceClass */
/* cache space for cost and selectivity */
QualCost eval_cost; /* eval cost of clause; -1 if not yet set */
Selectivity norm_selec; /* selectivity for "normal" (JOIN_INNER)
* semantics; -1 if not yet set; >1 means a
* redundant clause */
Selectivity outer_selec; /* selectivity for outer join semantics; -1 if
* not yet set */
/* valid if clause is mergejoinable, else NIL */
List *mergeopfamilies; /* opfamilies containing clause operator */
/* cache space for mergeclause processing; NULL if not yet set */
EquivalenceClass *left_ec; /* EquivalenceClass containing lefthand */
EquivalenceClass *right_ec; /* EquivalenceClass containing righthand */
EquivalenceMember *left_em; /* EquivalenceMember for lefthand */
EquivalenceMember *right_em; /* EquivalenceMember for righthand */
List *scansel_cache; /* list of MergeScanSelCache structs */
/* transient workspace for use while considering a specific join path */
bool outer_is_left; /* T = outer var on left, F = on right */
/* valid if clause is hashjoinable, else InvalidOid: */
Oid hashjoinoperator; /* copy of clause operator */
/* cache space for hashclause processing; -1 if not yet set */
Selectivity left_bucketsize; /* avg bucketsize of left side */
Selectivity right_bucketsize; /* avg bucketsize of right side */
} RestrictInfo;
/*
* Since mergejoinscansel() is a relatively expensive function, and would
* otherwise be invoked many times while planning a large join tree,
* we go out of our way to cache its results. Each mergejoinable
* RestrictInfo carries a list of the specific sort orderings that have
* been considered for use with it, and the resulting selectivities.
*/
typedef struct MergeScanSelCache
{
/* Ordering details (cache lookup key) */
Oid opfamily; /* btree opfamily defining the ordering */
int strategy; /* sort direction (ASC or DESC) */
bool nulls_first; /* do NULLs come before normal values? */
/* Results */
Selectivity leftstartsel; /* first-join fraction for clause left side */
Selectivity leftendsel; /* last-join fraction for clause left side */
Selectivity rightstartsel; /* first-join fraction for clause right side */
Selectivity rightendsel; /* last-join fraction for clause right side */
} MergeScanSelCache;
/*
* Inner indexscan info.
*
* An inner indexscan is one that uses one or more joinclauses as index
* conditions (perhaps in addition to plain restriction clauses). So it
* can only be used as the inner path of a nestloop join where the outer
* relation includes all other relids appearing in those joinclauses.
* The set of usable joinclauses, and thus the best inner indexscan,
* thus varies depending on which outer relation we consider; so we have
* to recompute the best such paths for every join. To avoid lots of
* redundant computation, we cache the results of such searches. For
* each relation we compute the set of possible otherrelids (all relids
* appearing in joinquals that could become indexquals for this table).
* Two outer relations whose relids have the same intersection with this
* set will have the same set of available joinclauses and thus the same
* best inner indexscans for the inner relation. By taking the intersection
* before scanning the cache, we avoid recomputing when considering
* join rels that differ only by the inclusion of irrelevant other rels.
*
* The search key also includes a bool showing whether the join being
* considered is an outer join. Since we constrain the join order for
* outer joins, I believe that this bool can only have one possible value
* for any particular lookup key; but store it anyway to avoid confusion.
*/
typedef struct InnerIndexscanInfo
{
NodeTag type;
/* The lookup key: */
Relids other_relids; /* a set of relevant other relids */
bool isouterjoin; /* true if join is outer */
/* Best paths for this lookup key (NULL if no available indexscans): */
Path *cheapest_startup_innerpath; /* cheapest startup cost */
Path *cheapest_total_innerpath; /* cheapest total cost */
} InnerIndexscanInfo;
/*
* Placeholder node for an expression to be evaluated below the top level
* of a plan tree. This is used during planning to represent the contained
* expression. At the end of the planning process it is replaced by either
* the contained expression or a Var referring to a lower-level evaluation of
* the contained expression. Typically the evaluation occurs below an outer
* join, and Var references above the outer join might thereby yield NULL
* instead of the expression value.
*
* Although the planner treats this as an expression node type, it is not
* recognized by the parser or executor, so we declare it here rather than
* in primnodes.h.
*/
typedef struct PlaceHolderVar
{
Expr xpr;
Expr *phexpr; /* the represented expression */
Relids phrels; /* base relids syntactically within expr src */
Index phid; /* ID for PHV (unique within planner run) */
Index phlevelsup; /* > 0 if PHV belongs to outer query */
} PlaceHolderVar;
/*
* "Special join" info.
*
* One-sided outer joins constrain the order of joining partially but not
* completely. We flatten such joins into the planner's top-level list of
* relations to join, but record information about each outer join in a
* SpecialJoinInfo struct. These structs are kept in the PlannerInfo node's
* join_info_list.
*
* Similarly, semijoins and antijoins created by flattening IN (subselect)
* and EXISTS(subselect) clauses create partial constraints on join order.
* These are likewise recorded in SpecialJoinInfo structs.
*
* We make SpecialJoinInfos for FULL JOINs even though there is no flexibility
* of planning for them, because this simplifies make_join_rel()'s API.
*
* min_lefthand and min_righthand are the sets of base relids that must be
* available on each side when performing the special join. lhs_strict is
* true if the special join's condition cannot succeed when the LHS variables
* are all NULL (this means that an outer join can commute with upper-level
* outer joins even if it appears in their RHS). We don't bother to set
* lhs_strict for FULL JOINs, however.
*
* It is not valid for either min_lefthand or min_righthand to be empty sets;
* if they were, this would break the logic that enforces join order.
*
* syn_lefthand and syn_righthand are the sets of base relids that are
* syntactically below this special join. (These are needed to help compute
* min_lefthand and min_righthand for higher joins.)
*
* delay_upper_joins is set TRUE if we detect a pushed-down clause that has
* to be evaluated after this join is formed (because it references the RHS).
* Any outer joins that have such a clause and this join in their RHS cannot
* commute with this join, because that would leave noplace to check the
* pushed-down clause. (We don't track this for FULL JOINs, either.)
*
* join_quals is an implicit-AND list of the quals syntactically associated
* with the join (they may or may not end up being applied at the join level).
* This is just a side list and does not drive actual application of quals.
* For JOIN_SEMI joins, this is cleared to NIL in create_unique_path() if
* the join is found not to be suitable for a uniqueify-the-RHS plan.
*
* jointype is never JOIN_RIGHT; a RIGHT JOIN is handled by switching
* the inputs to make it a LEFT JOIN. So the allowed values of jointype
* in a join_info_list member are only LEFT, FULL, SEMI, or ANTI.
*
* For purposes of join selectivity estimation, we create transient
* SpecialJoinInfo structures for regular inner joins; so it is possible
* to have jointype == JOIN_INNER in such a structure, even though this is
* not allowed within join_info_list. We also create transient
* SpecialJoinInfos with jointype == JOIN_INNER for outer joins, since for
* cost estimation purposes it is sometimes useful to know the join size under
* plain innerjoin semantics. Note that lhs_strict, delay_upper_joins, and
* join_quals are not set meaningfully within such structs.
*/
typedef struct SpecialJoinInfo
{
NodeTag type;
Relids min_lefthand; /* base relids in minimum LHS for join */
Relids min_righthand; /* base relids in minimum RHS for join */
Relids syn_lefthand; /* base relids syntactically within LHS */
Relids syn_righthand; /* base relids syntactically within RHS */
JoinType jointype; /* always INNER, LEFT, FULL, SEMI, or ANTI */
bool lhs_strict; /* joinclause is strict for some LHS rel */
bool delay_upper_joins; /* can't commute with upper RHS */
List *join_quals; /* join quals, in implicit-AND list format */
} SpecialJoinInfo;
/*
* Append-relation info.
*
* When we expand an inheritable table or a UNION-ALL subselect into an
* "append relation" (essentially, a list of child RTEs), we build an
* AppendRelInfo for each child RTE. The list of AppendRelInfos indicates
* which child RTEs must be included when expanding the parent, and each
* node carries information needed to translate Vars referencing the parent
* into Vars referencing that child.
*
* These structs are kept in the PlannerInfo node's append_rel_list.
* Note that we just throw all the structs into one list, and scan the
* whole list when desiring to expand any one parent. We could have used
* a more complex data structure (eg, one list per parent), but this would
* be harder to update during operations such as pulling up subqueries,
* and not really any easier to scan. Considering that typical queries
* will not have many different append parents, it doesn't seem worthwhile
* to complicate things.
*
* Note: after completion of the planner prep phase, any given RTE is an
* append parent having entries in append_rel_list if and only if its
* "inh" flag is set. We clear "inh" for plain tables that turn out not
* to have inheritance children, and (in an abuse of the original meaning
* of the flag) we set "inh" for subquery RTEs that turn out to be
* flattenable UNION ALL queries. This lets us avoid useless searches
* of append_rel_list.
*
* Note: the data structure assumes that append-rel members are single
* baserels. This is OK for inheritance, but it prevents us from pulling
* up a UNION ALL member subquery if it contains a join. While that could
* be fixed with a more complex data structure, at present there's not much
* point because no improvement in the plan could result.
*/
typedef struct AppendRelInfo
{
NodeTag type;
/*
* These fields uniquely identify this append relationship. There can be
* (in fact, always should be) multiple AppendRelInfos for the same
* parent_relid, but never more than one per child_relid, since a given
* RTE cannot be a child of more than one append parent.
*/
Index parent_relid; /* RT index of append parent rel */
Index child_relid; /* RT index of append child rel */
/*
* For an inheritance appendrel, the parent and child are both regular
* relations, and we store their rowtype OIDs here for use in translating
* whole-row Vars. For a UNION-ALL appendrel, the parent and child are
* both subqueries with no named rowtype, and we store InvalidOid here.
*/
Oid parent_reltype; /* OID of parent's composite type */
Oid child_reltype; /* OID of child's composite type */
/*
* The N'th element of this list is a Var or expression representing the
* child column corresponding to the N'th column of the parent. This is
* used to translate Vars referencing the parent rel into references to
* the child. A list element is NULL if it corresponds to a dropped
* column of the parent (this is only possible for inheritance cases, not
* UNION ALL). The list elements are always simple Vars for inheritance
* cases, but can be arbitrary expressions in UNION ALL cases.
*
* Notice we only store entries for user columns (attno > 0). Whole-row
* Vars are special-cased, and system columns (attno < 0) need no special
* translation since their attnos are the same for all tables.
*
* Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
* when copying into a subquery.
*/
List *translated_vars; /* Expressions in the child's Vars */
/*
* We store the parent table's OID here for inheritance, or InvalidOid for
* UNION ALL. This is only needed to help in generating error messages if
* an attempt is made to reference a dropped parent column.
*/
Oid parent_reloid; /* OID of parent relation */
} AppendRelInfo;
/*
* For each distinct placeholder expression generated during planning, we
* store a PlaceHolderInfo node in the PlannerInfo node's placeholder_list.
* This stores info that is needed centrally rather than in each copy of the
* PlaceHolderVar. The phid fields identify which PlaceHolderInfo goes with
* each PlaceHolderVar. Note that phid is unique throughout a planner run,
* not just within a query level --- this is so that we need not reassign ID's
* when pulling a subquery into its parent.
*
* The idea is to evaluate the expression at (only) the ph_eval_at join level,
* then allow it to bubble up like a Var until the ph_needed join level.
* ph_needed has the same definition as attr_needed for a regular Var.
*
* ph_may_need is an initial estimate of ph_needed, formed using the
* syntactic locations of references to the PHV. We need this in order to
* determine whether the PHV reference forces a join ordering constraint:
* if the PHV has to be evaluated below the nullable side of an outer join,
* and then used above that outer join, we must constrain join order to ensure
* there's a valid place to evaluate the PHV below the join. The final
* actual ph_needed level might be lower than ph_may_need, but we can't
* determine that until later on. Fortunately this doesn't matter for what
* we need ph_may_need for: if there's a PHV reference syntactically
* above the outer join, it's not going to be allowed to drop below the outer
* join, so we would come to the same conclusions about join order even if
* we had the final ph_needed value to compare to.
*
* We create a PlaceHolderInfo only after determining that the PlaceHolderVar
* is actually referenced in the plan tree, so that unreferenced placeholders
* don't result in unnecessary constraints on join order.
*/
typedef struct PlaceHolderInfo
{
NodeTag type;
Index phid; /* ID for PH (unique within planner run) */
PlaceHolderVar *ph_var; /* copy of PlaceHolderVar tree */
Relids ph_eval_at; /* lowest level we can evaluate value at */
Relids ph_needed; /* highest level the value is needed at */
Relids ph_may_need; /* highest level it might be needed at */
int32 ph_width; /* estimated attribute width */
} PlaceHolderInfo;
/*
* At runtime, PARAM_EXEC slots are used to pass values around from one plan
* node to another. They can be used to pass values down into subqueries (for
* outer references in subqueries), or up out of subqueries (for the results
* of a subplan).
* The planner is responsible for assigning nonconflicting PARAM_EXEC IDs to
* the PARAM_EXEC Params it generates.
*
* Outer references are managed via root->plan_params, which is a list of
* PlannerParamItems. While planning a subquery, each parent query level's
* plan_params contains the values required from it by the current subquery.
*
* The item a PlannerParamItem represents can be one of three kinds:
*
* A Var: the slot represents a variable of this level that must be passed
* down because subqueries have outer references to it. The varlevelsup
* value in the Var will always be zero.
*
* A PlaceHolderVar: this works much like the Var case, except that the
* entry is a PlaceHolderVar node with a contained expression. The PHV
* will have phlevelsup = 0, and the contained expression is adjusted
* to match in level.
*
* An Aggref (with an expression tree representing its argument): the slot
* represents an aggregate expression that is an outer reference for some
* subquery. The Aggref itself has agglevelsup = 0, and its argument tree
* is adjusted to match in level.
*
* Note: we detect duplicate Var and PlaceHolderVar parameters and coalesce
* them into one slot, but we do not bother to do that for Aggrefs.
* The scope of duplicate-elimination only extends across the set of
* parameters passed from one query level into a single subquery. So there is
* no possibility of a PARAM_EXEC slot being used for conflicting purposes.
*
* In addition, PARAM_EXEC slots are assigned for Params representing outputs
* from subplans (values that are setParam items for those subplans). These
* IDs need not be tracked via PlannerParamItems, since we do not need any
* duplicate-elimination nor later processing of the represented expressions.
* Instead, we just record the assignment of the slot number by incrementing
* root->glob->nParamExec.
*/
typedef struct PlannerParamItem
{
NodeTag type;
Node *item; /* the Var, PlaceHolderVar, or Aggref */
int paramId; /* its assigned PARAM_EXEC slot number */
} PlannerParamItem;
#endif /* RELATION_H */
| {
"content_hash": "a76a68d76e14256623964304a83a3868",
"timestamp": "",
"source": "github",
"line_count": 1395,
"max_line_length": 79,
"avg_line_length": 44.21720430107527,
"alnum_prop": 0.7328761571259504,
"repo_name": "ycsoft/FatCat-Server",
"id": "300a8f4e6ff670f877e5c5c7363552099ff2739e",
"size": "62162",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "LIBS/postgresql/postgresql/server/nodes/relation.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "195345"
},
{
"name": "Batchfile",
"bytes": "32367"
},
{
"name": "C",
"bytes": "9529739"
},
{
"name": "C#",
"bytes": "41850"
},
{
"name": "C++",
"bytes": "175536080"
},
{
"name": "CMake",
"bytes": "14812"
},
{
"name": "CSS",
"bytes": "282447"
},
{
"name": "Cuda",
"bytes": "26521"
},
{
"name": "FORTRAN",
"bytes": "1856"
},
{
"name": "Groff",
"bytes": "6163"
},
{
"name": "HTML",
"bytes": "148956564"
},
{
"name": "JavaScript",
"bytes": "174868"
},
{
"name": "Lex",
"bytes": "1290"
},
{
"name": "Makefile",
"bytes": "1045258"
},
{
"name": "Max",
"bytes": "37424"
},
{
"name": "Objective-C",
"bytes": "34644"
},
{
"name": "Objective-C++",
"bytes": "246"
},
{
"name": "PHP",
"bytes": "60249"
},
{
"name": "Perl",
"bytes": "37297"
},
{
"name": "Perl6",
"bytes": "2130"
},
{
"name": "Python",
"bytes": "1717781"
},
{
"name": "QML",
"bytes": "613"
},
{
"name": "QMake",
"bytes": "9450"
},
{
"name": "Rebol",
"bytes": "372"
},
{
"name": "Shell",
"bytes": "372652"
},
{
"name": "Tcl",
"bytes": "1205"
},
{
"name": "TeX",
"bytes": "13819"
},
{
"name": "XSLT",
"bytes": "564356"
},
{
"name": "Yacc",
"bytes": "19612"
}
],
"symlink_target": ""
} |
(function () {
var qUnitGlobalErrorHandler = window.onerror;
var testTimeout = QUnit.urlParams.testtimeout ? QUnit.urlParams.testtimeout : 15000;
var startTime = -1;
var hasRun = false;
var testFailed = false;
var testError = null;
var verboseLog = "";
var log = [];
var socketId = document.title;
var socketSignal = null;
QUnit.config.autostart = false;
QUnit.config.testTimeout = testTimeout;
QUnit.config.hidepassed = true;
QUnit.breakOnAssertFail = false;
var qunitDiv;
var qunitTestFixtureDiv;
window.addEventListener("load", function () {
qunitDiv = document.querySelector("#qunit");
qunitTestFixtureDiv = document.querySelector("#qunit-fixture");
function addOptions() {
function createOption(id, label, initiallyChecked) {
var cb = document.createElement("input");
cb.type = "checkbox";
cb.id = id;
cb.checked = initiallyChecked;
var span = document.createElement("span");
span.innerHTML = label;
toolBar.appendChild(cb);
toolBar.appendChild(span);
}
var toolBar = document.querySelector("#qunit-testrunner-toolbar");
if (!toolBar) {
setTimeout(addOptions);
return;
}
createOption("breakOnAssertFail", "Break on Assert fail", QUnit.urlParams.breakonassertfail === "true" || QUnit.urlParams.breakonassertfail === true);
createOption("disableTestTimeout", "Disable test timeout", QUnit.urlParams.disabletesttimeout === "true" || QUnit.urlParams.disabletesttimeout === true);
createOption("fastAnimations", "Fast Animations", QUnit.urlParams.fastanimations === "true" || QUnit.urlParams.fastanimations === true);
createOption("loopTests", "Loop Tests", QUnit.urlParams.loop === "true" || QUnit.urlParams.loop === true);
var btn = document.createElement("button");
btn.id = "startButton";
btn.style.borderColor = btn.style.color = "#5E740B";
btn.style.marginLeft = "4px";
btn.innerHTML = "Start";
btn.onclick = function () {
// Changing the fast animations setting requires a re-load.
if (!hasRun && (WinJS.Utilities._fastAnimations === document.querySelector("#fastAnimations").checked)) {
start();
} else {
var qs = "?breakonassertfail=" + document.querySelector("#breakOnAssertFail").checked;
qs += "&disabletesttimeout=" + document.querySelector("#disableTestTimeout").checked;
qs += "&fastanimations=" + document.querySelector("#fastAnimations").checked;
qs += "&loop=" + document.querySelector("#loopTests").checked;
qs += "&autostart=true";
if (QUnit.urlParams.module) {
qs += "&module=" + QUnit.urlParams.module;
}
if (QUnit.urlParams.testNumber) {
qs += "&testNumber=" + QUnit.urlParams.testNumber;
}
window.location = window.location.protocol + "//" + window.location.host + window.location.pathname + qs;
}
};
toolBar.appendChild(btn);
if (QUnit.urlParams.autostart === "true" || QUnit.urlParams.autostart === true) {
start();
}
}
addOptions();
if (QUnit.urlParams.subscriptionKey) {
var socket = null;
var socketReady = false;
var listeners = [];
socketSignal = function (callback) {
if (socketReady) {
callback(socket);
} else {
listeners.push(callback);
}
};
var attempts = 0;
setTimeout(function connect() {
try {
socket = new WebSocket("ws://localhost:9998");
socket.onopen = function () {
socket.send(JSON.stringify({ id: socketId, type: "registerReporter", args: { subscriptionKey: QUnit.urlParams.subscriptionKey } }));
listeners.forEach(function (listener) {
listener(socket);
});
socketReady = true;
};
socket.onclose = function (m) {
setTimeout(window.close, 500);
}
attempts++;
} catch (e) {
// new WebSocket() can throw a security exception when there are too many connections
// going out at once; since the dashboard launches 4+ test pages at once, we may see
// some of these.
if (attempts < 5) {
setTimeout(connect, 500);
}
}
}, 500);
}
});
if (QUnit.urlParams.fastanimations === "true" || QUnit.urlParams.fastanimations === true) {
WinJS.Utilities._fastAnimations = true;
}
function start() {
hasRun = true;
QUnit.breakOnAssertFail = document.querySelector("#breakOnAssertFail").checked;
QUnit.config.testTimeout = document.querySelector("#disableTestTimeout").checked ? undefined : testTimeout;
QUnit.config.started = +new Date(); // This is a temporary fix and can be removed when and if jquery/qunit#555 is accepted.
QUnit.start();
}
function completeTest() {
// Since we want one assert per test, if this test times out, then we do not
// call asserts because the timeout itself is a failed assert.
if (Date.now() - startTime < testTimeout || typeof QUnit.config.testTimeout === 'undefined') {
QUnit.assert.ok(!testFailed, testError && testError.message);
} else {
verboseLog = "Test timeout - " + verboseLog;
}
QUnit.start();
}
function handleGlobalError(testFunc, error) {
var expectedException = testFunc["LiveUnit.ExpectedException"];
if (expectedException) {
if (expectedException.message) {
expectedException = [expectedException];
}
var handled = false;
for (var i = 0; i < expectedException.length; i++) {
var message = expectedException[i].message
// Chrome prefixes with "Uncaught Error". Firefox prefixes with "Error"
if (message === error || ("Uncaught Error: " + message) === error || ("Error: " + message) === error) {
handled = true;
break;
}
}
if (!handled) {
LiveUnit.Assert.fail("Unexpected exception: " + error);
}
} else {
LiveUnit.Assert.fail("Unexpected exception: " + error);
}
}
function hookupGlobalErrorHandler(testFunc) {
var expectedException = testFunc["LiveUnit.ExpectedException"];
if (expectedException) {
if (expectedException.message) {
expectedException = [expectedException];
}
window.onerror = function (e) {
handleGlobalError(testFunc, e);
};
} else {
window.onerror = qUnitGlobalErrorHandler;
}
}
function cleanUp() {
testFailed = false;
testError = null;
verboseLog = "";
qunitDiv.style.zIndex = 0;
}
function AllObjectKeys(obj) {
var keys = Object.keys(obj);
var proto = Object.getPrototypeOf(obj);
if(proto) {
var protoKeys = AllObjectKeys(proto);
return keys.concat(protoKeys);
}
return keys;
}
QUnit.testStart(function testStart(testDetails) {
qunitDiv.style.zIndex = -1;
});
QUnit.log(function (details) {
if (!details.result) {
testError = testError || {};
testError.source = details.source;
}
});
QUnit.testDone(function testDone(test) {
if (test.failed) {
console.log(test.module + ": " + test.name + " failed, " + test.runtime + "ms");
console.log(verboseLog);
log.push({
name: test.name,
result: !!test.failed,
expected: testError.expected,
actual: testError.actual,
// Omit all but the first few callstacks to keep our results data small.
// If it's larger than 64 KB, Saucelabs will ignore it.
source: (log.length < 3 && testError.source) ? testError.source.substr(0, 500) : null
});
socketSignal && socketSignal(function (socket) {
socket.send(JSON.stringify({
id: socketId,
type: "report",
args: {
data: {
type: "singleFailure",
name: test.name,
number: QUnit.config.current.testNumber
}
}
}));
});
}
cleanUp();
});
QUnit.moduleDone(function (module) {
if (document.body.children.length > 2) {
for (var i = document.body.children.length - 1; i >= 0; i--) {
var child = document.body.children[i];
if (child === qunitDiv || child === qunitTestFixtureDiv) {
continue;
}
console.log("Test: " + module.name + " - Incomplete cleanup!");
WinJS.Utilities.disposeSubTree(child);
document.body.removeChild(child);
}
}
});
QUnit.done(function (results) {
if (document.querySelector("#loopTests").checked) {
if (!log.length) {
document.querySelector("#startButton").click();
}
} else {
results.tests = log;
results.url = document.location.href;
window.global_test_results = results;
socketSignal && socketSignal(function (socket) {
socket.send(JSON.stringify({
id: socketId,
type: "report",
args: {
data: {
type: "finished",
runtime: results.runtime,
failures: log.length
}
}
}));
socket.close();
});
}
});
function formatString(string) {
var args = arguments;
if (args.length > 1) {
string = string.replace(/({{)|(}})|{(\d+)}|({)|(})/g,
function (unused, left, right, index, illegalLeft, illegalRight) {
if (illegalLeft || illegalRight) {
throw new Error(formatString("Malformed string input: {0}", illegalLeft || illegalRight));
}
return (left && "{") || (right && "}") || args[(index | 0) + 1];
});
}
return string;
}
window.LiveUnit = {
Assert: {
areEqual: function (expected, actual, message) {
if (expected !== actual) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("areEqual - {0} (expected: {1}, actual: {2})", message || "", expected, actual),
expected: expected,
actual: actual
};
testFailed = true;
}
},
areNotEqual: function (left, right, message) {
if (left === right) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("areNotEqual - {0} (both equal: {1})", message || "", left),
expected: left,
actual: right
};
testFailed = true;
}
},
fail: function (message) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("fail - {0}", message || ""),
expected: "pass",
actual: "fail"
};
testFailed = true;
},
isFalse: function (falsy, message) {
if (falsy) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("isFalse - {0} (expected: falsy, actual: {1})", message || "", falsy),
expected: "falsy",
actual: falsy
};
testFailed = true;
}
},
isTrue: function (truthy, message) {
if (!truthy) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("isTrue - {0} (expected: truthy, actual: {1})", message || "", truthy),
expected: "truthy",
actual: truthy
};
testFailed = true;
}
},
isNull: function (obj, message) {
// LiveUnit's null assert also accepts undefined
var pass = obj === null || obj === undefined;
if (!pass) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("isNull - {0} (expected: null or undefined, actual: {1})", message || "", obj),
expected: "null",
actual: obj
};
testFailed = true;
}
},
isNotNull: function (obj, message) {
// LiveUnit's null assert also accepts undefined
var pass = obj !== null && obj !== undefined;
if (!pass) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("isNotNull - {0} (expected: not null and not undefined, actual: {1})", message || "", obj),
expected: "not null",
actual: obj
};
testFailed = true;
}
},
stringContains: function (str, substr, message) {
var pass = str.indexOf(substr) !== -1;
if (!pass) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("stringContains - {0} (substring '{1}' not present in '{2}')", message || "", substr, str),
expected: substr,
actual: str
};
testFailed = true;
}
},
stringDoesNotContain: function (str, substr, message) {
var pass = str.indexOf(substr) === -1;
if (!pass) {
if (QUnit.breakOnAssertFail) {
debugger;
}
testError = testError || {
message: formatString("stringDoesNotContain - {0} (substring '{1}' was present in '{2}')", message || "", substr, str),
expected: substr,
actual: str
};
testFailed = true;
}
}
},
GetWrappedCallback: function (func) {
return func;
},
LoggingCore: {
logComment: function (message) {
verboseLog += "\n" + message;
},
getVerboseLog: function () {
return verboseLog;
},
},
registerTestClass: function (moduleName) {
function runSetupTeardownFunc(func) {
if (func.length) {
QUnit.stop();
func(function () {
QUnit.start();
});
} else {
func();
}
}
var path = moduleName.split(".");
var module = window;
path.forEach(function (key) {
module = module[key];
});
var testModule = new module();
QUnit.module(moduleName, {
setup: function () {
if (!testModule.setUp) {
return;
}
runSetupTeardownFunc(testModule.setUp.bind(testModule));
},
teardown: function () {
if (!testModule.tearDown) {
return;
}
runSetupTeardownFunc(testModule.tearDown.bind(testModule));
}
});
AllObjectKeys(testModule).forEach(function (key) {
if (key.indexOf("test") !== 0) {
return;
}
var testName = key.substr("test".length);
var testFunc = testModule[key];
if (testFunc.length) {
// Async WebUnit tests take a 'complete' parameter
QUnit.asyncTest(testName, function () {
startTime = Date.now();
hookupGlobalErrorHandler(testFunc);
var error = false;
try {
testFunc.call(testModule, function () {
if (!error) {
completeTest();
}
});
} catch (e) {
handleGlobalError(testFunc, e.message);
completeTest();
error = true;
}
});
} else {
QUnit.asyncTest(testName, function () {
startTime = Date.now();
hookupGlobalErrorHandler(testFunc);
try {
testFunc.call(testModule);
completeTest();
}
catch (e) {
handleGlobalError(testFunc, e.message);
completeTest();
}
});
}
});
},
};
})();
| {
"content_hash": "4f87ecfb39300e0f67280ee9bb25ec90",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 165,
"avg_line_length": 38.15799614643545,
"alnum_prop": 0.4451625934154716,
"repo_name": "Guadzilah/winjs",
"id": "af6651a747222e8160d46c4776624919a3641926",
"size": "19845",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/TestLib/liveToQ/liveToQ.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "262870"
},
{
"name": "HTML",
"bytes": "227080"
},
{
"name": "JavaScript",
"bytes": "4527688"
},
{
"name": "TypeScript",
"bytes": "6234480"
}
],
"symlink_target": ""
} |
package ragnardb.parser.ast;
/**
* Created by klu on 7/7/2015.
*/
public class DropTable {
}
| {
"content_hash": "2403658971a45991b1122d4a73be2b02",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 30,
"avg_line_length": 13.714285714285714,
"alnum_prop": 0.6666666666666666,
"repo_name": "gosu-lang/ragnardb",
"id": "c62c4a32def99f3130266b9219e5001ae673e719",
"size": "96",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ragnardb/src/main/java/ragnardb/parser/ast/DropTable.java",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "GAP",
"bytes": "7162"
},
{
"name": "Gosu",
"bytes": "2992"
},
{
"name": "Java",
"bytes": "509638"
}
],
"symlink_target": ""
} |
set oracle_oci_home=%userprofile%\Downloads\instantclient-sdk-nt-12.1.0.1.0\instantclient_12_1\sdk
set include=%include%;%oracle_oci_home%\include
cl /Zi testocci.cpp /link oci.lib oraocci12.lib /libpath:%oracle_oci_home%\lib\msvc\vc11 /libpath:%oracle_oci_home%\lib\msvc
setlocal
set path=%userprofile%\downloads\instantclient-basic-nt-12.1.0.1.0\instantclient_12_1\vc11;%userprofile%\downloads\instantclient-basic-nt-12.1.0.1.0\instantclient_12_1;%path%
testocci.exe scott tiger (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=w8-32-12core)(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=XE)(SERVER=DEDICATED))) "select * from emp"
endlocal | {
"content_hash": "f6e5e2677a6f75d2fabd444bbf9db6e2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 174,
"avg_line_length": 57.45454545454545,
"alnum_prop": 0.7753164556962026,
"repo_name": "aam/oracledart",
"id": "be85616a38d49ab58edff6a41f5723721e1d40b3",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testocci/buildandrun.cmd",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2364"
},
{
"name": "C++",
"bytes": "20952"
},
{
"name": "Dart",
"bytes": "15382"
},
{
"name": "Python",
"bytes": "1879"
},
{
"name": "Shell",
"bytes": "820"
}
],
"symlink_target": ""
} |
%% Extract vgg features (14*14*512) from
%% Pororo Animation
path_to_image = '/Volumes/Oculus/data/Pororo/images/';
addpath('preprocess');
%% Extract features from individual discs.
for season = 2 : 4
for disc = 1 : 4
target = sprintf('Pororo_ENGLISH%d_%d', season, disc);
%extract_vgg(target, path_to_image);
%create_cp(target);
create_sub(target);
end
end | {
"content_hash": "2b20c1705055f980bcbe288095f7c7be",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 26.6,
"alnum_prop": 0.6390977443609023,
"repo_name": "jnhwkim/AMMA",
"id": "157ac7f012b539c606ba0b2177bffa477ee19c4d",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extract_vgg_batch.m",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "12967"
},
{
"name": "Python",
"bytes": "1473"
},
{
"name": "Shell",
"bytes": "1789"
}
],
"symlink_target": ""
} |
/*
* histotest.c
*
* Makes histograms of grayscale and color pixels
* from a pix. For RGB color, this uses
* rgb --> octcube indexing.
*
* histotest filein sigbits
*
* where the number of octcubes is 8^(sigbits)
*
* For gray, sigbits is ignored.
*/
#include "allheaders.h"
main(int argc,
char **argv)
{
char *filein;
l_int32 d, sigbits;
GPLOT *gplot;
NUMA *na;
PIX *pixs, *pixd;
static char mainName[] = "histotest";
if (argc != 3)
exit(ERROR_INT(" Syntax: histotest filein sigbits", mainName, 1));
filein = argv[1];
sigbits = atoi(argv[2]);
if ((pixs = pixRead(filein)) == NULL)
exit(ERROR_INT("pixs not made", mainName, 1));
d = pixGetDepth(pixs);
if (d != 8 && d != 32)
exit(ERROR_INT("depth not 8 or 32 bpp", mainName, 1));
if (d == 32) {
startTimer();
if ((na = pixOctcubeHistogram(pixs, sigbits, NULL)) == NULL)
exit(ERROR_INT("na not made", mainName, 1));
fprintf(stderr, "histo time = %7.3f sec\n", stopTimer());
gplot = gplotCreate("/tmp/junkrootc", GPLOT_X11,
"color histogram with octcube indexing",
"octcube index", "number of pixels in cube");
gplotAddPlot(gplot, NULL, na, GPLOT_LINES, "input pix");
gplotMakeOutput(gplot);
gplotDestroy(&gplot);
}
else {
if ((na = pixGetGrayHistogram(pixs, 1)) == NULL)
exit(ERROR_INT("na not made", mainName, 1));
numaWrite("/tmp/junkna", na);
gplot = gplotCreate("/tmp/junkrootg", GPLOT_X11, "grayscale histogram",
"gray value", "number of pixels");
gplotSetScaling(gplot, GPLOT_LOG_SCALE_Y);
gplotAddPlot(gplot, NULL, na, GPLOT_LINES, "input pix");
gplotMakeOutput(gplot);
gplotDestroy(&gplot);
}
pixDestroy(&pixs);
numaDestroy(&na);
return 0;
}
| {
"content_hash": "00856aac3af68568aeb40a7b835c5b76",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 72,
"avg_line_length": 26.014492753623188,
"alnum_prop": 0.6128133704735376,
"repo_name": "pasella/tess-two",
"id": "bb6338d96f4850301115fb9f404c3b1ee5955a58",
"size": "3309",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "jni/com_googlecode_leptonica_android/src/prog/histotest.c",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11065963"
},
{
"name": "C++",
"bytes": "5716282"
},
{
"name": "Groff",
"bytes": "48954"
},
{
"name": "HTML",
"bytes": "231159"
},
{
"name": "Java",
"bytes": "180508"
},
{
"name": "Makefile",
"bytes": "25751785"
},
{
"name": "PostScript",
"bytes": "3630"
},
{
"name": "Python",
"bytes": "2063"
},
{
"name": "Shell",
"bytes": "313719"
},
{
"name": "TeX",
"bytes": "2741"
}
],
"symlink_target": ""
} |
class AdminBlocksController < AdminController
add_breadcrumb I18n.t('theme.blocks'), 'admin_blocks_path'
before_filter :new_block, :only => [:new, :create]
before_filter :edit_block, :only => [:edit, :update]
def index
@unassigneds = Block.unassigneds.sort_by { |b| t("#{b.class.to_s.underscore}.title") }
@classes = Block.subclasses.sort_by { |c| t("#{c.to_s.underscore}.title") }
end
def new
end
# Save/update all blocks.
def create
if @block.save
flash[:notice] = t('theme.blocks_saved')
redirect_to(admin_blocks_path)
else
render(:action => 'new')
end
end
def edit
end
def update
if @block.update_attributes(params[@block.class.to_s.underscore])
flash[:notice] = t('theme.blocks_saved')
redirect_to(admin_blocks_path)
else
render(:action => 'edit')
end
end
def destroy
Block.destroy(params[:id])
redirect_to(admin_blocks_path)
end
def batch_update
confs = params[:blocks]
confs.each do |id, attrs|
block = Block.find(id)
block.update_attributes(attrs)
end
flash[:notice] = t('theme.blocks_saved')
redirect_to(admin_blocks_path)
end
private
def new_block
type = params[:type]
klass = type.constantize
@block = klass.new(params[klass.to_s.underscore])
title = t("#{type.underscore}.title")
add_breadcrumb(t('theme.create_new_block', :block => title))
end
def edit_block
@block = Block.find(params[:id])
title = t("#{@block.class.to_s.underscore}.title")
add_breadcrumb(t('theme.edit_block', :block => title))
end
end
| {
"content_hash": "fb451dd614b5d48a9b347040be4c12cd",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 90,
"avg_line_length": 24.134328358208954,
"alnum_prop": 0.6357452071737786,
"repo_name": "ngocdaothanh/openkh",
"id": "32621eb7404a8696fc842fea1ee4b4e7475d3f64",
"size": "1617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/core/theme/app/controllers/admin_blocks_controller.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "4096"
},
{
"name": "JavaScript",
"bytes": "23533"
},
{
"name": "Ruby",
"bytes": "158791"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8" ?>
<class name="XROrigin3D" inherits="Node3D" version="4.0">
<brief_description>
The origin point in AR/VR.
</brief_description>
<description>
This is a special node within the AR/VR system that maps the physical location of the center of our tracking space to the virtual location within our game world.
There should be only one of these nodes in your scene and you must have one. All the XRCamera3D, XRController3D and XRAnchor3D nodes should be direct children of this node for spatial tracking to work correctly.
It is the position of this node that you update when your character needs to move through your game world while we're not moving in the real world. Movement in the real world is always in relation to this origin point.
For example, if your character is driving a car, the XROrigin3D node should be a child node of this car. Or, if you're implementing a teleport system to move your character, you should change the position of this node.
</description>
<tutorials>
<link title="VR documentation index">https://docs.godotengine.org/en/latest/tutorials/vr/index.html</link>
</tutorials>
<members>
<member name="world_scale" type="float" setter="set_world_scale" getter="get_world_scale" default="1.0">
Allows you to adjust the scale to your game's units. Most AR/VR platforms assume a scale of 1 game world unit = 1 real world meter.
[b]Note:[/b] This method is a passthrough to the [XRServer] itself.
</member>
</members>
</class>
| {
"content_hash": "fa85f8ddb21663cae26dda07c792fd73",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 220,
"avg_line_length": 72.0952380952381,
"alnum_prop": 0.7562747688243064,
"repo_name": "DmitriySalnikov/godot",
"id": "0d8acfeb1b5913f578c76f0204781635a879dd39",
"size": "1514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/classes/XROrigin3D.xml",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "50004"
},
{
"name": "C#",
"bytes": "175747"
},
{
"name": "C++",
"bytes": "17474662"
},
{
"name": "GLSL",
"bytes": "1271"
},
{
"name": "Java",
"bytes": "499031"
},
{
"name": "JavaScript",
"bytes": "9580"
},
{
"name": "Makefile",
"bytes": "451"
},
{
"name": "Objective-C",
"bytes": "2644"
},
{
"name": "Objective-C++",
"bytes": "169329"
},
{
"name": "Python",
"bytes": "293239"
},
{
"name": "Shell",
"bytes": "11043"
}
],
"symlink_target": ""
} |
#ifndef _FCStore_h
#define _FCStore_h
#include "Shared/Core/FCCore.h"
#include "FCStoreItem.h"
class FCStore {
public:
static FCStore* Instance();
FCStore();
virtual ~FCStore();
void WarmBoot();
bool Available();
void GetStoreItemDetailsWithLuaCallbacks( FCStringVector items, std::string luaCallback, std::string luaError );
void ClearStoreItems();
void AddStoreItem( std::string description, std::string price, std::string identifier );
void EndStoreItems();
void PurchaseRequest( std::string identifier, std::string luaSuccess, std::string luaError );
void PurchaseSuccessful( std::string identifier );
void PurchaseFailed( std::string identifier );
private:
std::string m_storeItemDetailsLauCallback;
std::string m_storeItemDetailsLuaError;
FCStoreItemVec m_storeItems;
std::string m_purchaseSuccessLuaCallback;
std::string m_purchaseFailLuaCallback;
};
#endif
| {
"content_hash": "2ae50efb56b24226eb4a5a831e85afeb",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 113,
"avg_line_length": 22.575,
"alnum_prop": 0.7585825027685493,
"repo_name": "mslinklater/FC",
"id": "9accb028ac69948725a9826c93832e80661f522d",
"size": "1996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Shared/Framework/Store/FCStore.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29601"
},
{
"name": "C++",
"bytes": "547365"
},
{
"name": "GLSL",
"bytes": "7446"
},
{
"name": "Java",
"bytes": "1408"
},
{
"name": "Lua",
"bytes": "23931"
},
{
"name": "Makefile",
"bytes": "7665"
},
{
"name": "Objective-C",
"bytes": "108555"
},
{
"name": "Objective-C++",
"bytes": "148765"
},
{
"name": "Python",
"bytes": "31723"
}
],
"symlink_target": ""
} |
<div class="container">
<div class="row">
<div class="span12">
<ul class="breadcrumb">
<li><a href="/">Inicio</a></li>
<li><a href="/productosdetalleportiendas/list">Productos por Tiendas </a></li>
<li class="active">Nuevo Producto por Tienda</li>
</ul>
<div id="crudBlank" class="${type.name}">
<h2 id="crudBlankTitle">&{'crud.blank.title', type.modelName}</h2>
<div class="objectForm">
#{form action:@create(), enctype:'multipart/form-data'}
#{crud.form /}
<p class="crudButtons">
<input type="submit" class="btn btn-primary" name="_save" value="&{'crud.save', type.modelName}" />
<input type="submit" class="btn btn-primary" name="_saveAndContinue" value="&{'crud.saveAndContinue', type.modelName}" />
<input type="submit" class="btn btn-primary" name="_saveAndAddAnother" value="&{'crud.saveAndAddAnother', type.modelName}" />
</p>
#{/form}
</div>
</div>
</div>
</div>
</div> | {
"content_hash": "3f43723e322e06eee67a283d3f6ba4e7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 149,
"avg_line_length": 41.96551724137931,
"alnum_prop": 0.49219391947411667,
"repo_name": "marti1125/Project_Store",
"id": "72405d832e1af382cd1a37e16f8fdf353ab02dee",
"size": "1307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views/TiendasProductosDetalles/blank.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16527"
},
{
"name": "HTML",
"bytes": "68470"
},
{
"name": "Java",
"bytes": "41241"
},
{
"name": "JavaScript",
"bytes": "233172"
}
],
"symlink_target": ""
} |
class MyProcessor : public apf::MimoProcessor<MyProcessor
, apf::pointer_policy<float*>>
{
public:
using Input = MimoProcessorBase::DefaultInput;
class MyIntermediateThing : public ProcessItem<MyIntermediateThing>
{
public:
// you can create other classes and use them in their own RtList, as
// long as they are derived from ProcessItem<YourClass> and have a
// Process class publicly derived from ProcessItem<YourClass>::Process.
// This can be facilitated with this macro call:
APF_PROCESS(MyIntermediateThing, ProcessItem<MyIntermediateThing>)
{
// do your processing here!
}
};
class Output : public MimoProcessorBase::DefaultOutput
{
public:
explicit Output(const Params& p)
: MimoProcessorBase::DefaultOutput(p)
{}
APF_PROCESS(Output, MimoProcessorBase::DefaultOutput)
{
// this->buffer.begin() and this->buffer.end(): access to audio data
}
};
MyProcessor(const apf::parameter_map& p)
: MimoProcessorBase(p)
, _intermediate_list(_fifo)
{
this->add<Input>();
_intermediate_list.add(new MyIntermediateThing());
this->add<Output>();
this->activate();
}
~MyProcessor() { this->deactivate(); }
APF_PROCESS(MyProcessor, MimoProcessorBase)
{
// input/output lists are processed automatically before/after this:
_process_list(_intermediate_list);
}
private:
rtlist_t _intermediate_list;
};
int main()
{
// For now, this does nothing, we just want it to compile ...
}
| {
"content_hash": "d3528efd1556e421ec35e08b19b44979",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 27.93220338983051,
"alnum_prop": 0.6286407766990292,
"repo_name": "AudioProcessingFramework/apf",
"id": "c2db5d2d0c32c12421d8f489a898d2ed288ca4a2",
"size": "1784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dummy_example.cpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5979"
},
{
"name": "C++",
"bytes": "620843"
},
{
"name": "Makefile",
"bytes": "3022"
},
{
"name": "Shell",
"bytes": "968"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?><!--
Copyright 2003 - 2016 The eFaps Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<ui-command xmlns="http://www.efaps.org/xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.efaps.org/xsd http://www.efaps.org/xsd/eFaps_1.0.xsd">
<uuid>a5b47321-4b68-4350-8108-1df4bf3a5fb1</uuid>
<file-application>eFaps-Kernel</file-application>
<definition>
<version-expression>(version==latest)</version-expression>
<name>Admin_Help_ToggleEdit</name>
<access>
<role>Administration</role>
<role>Admin_Help_Admin</role>
</access>
<target>
<execute program="org.efaps.esjp.common.help.HelpProvider" method="toggleEdit"/>
</target>
</definition>
</ui-command>
| {
"content_hash": "740b1a1f8a4b348ac63088974481b14b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 98,
"avg_line_length": 40.57575757575758,
"alnum_prop": 0.6967886482449589,
"repo_name": "eFaps/eFaps-Kernel-Install",
"id": "010cc7d0935004ef85924f65c3fb186e2dd059c3",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/efaps/Admin/Help/UserInterface/Admin_Help_ToggleEdit.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1478988"
},
{
"name": "JavaScript",
"bytes": "39333"
}
],
"symlink_target": ""
} |
echo prepare scripts ...
cp ../abstract/out_d_common.sql .
perl convert2mysql.pl out_d_common.sql
rm out_d_common.sql
cp ../abstract/out_d_request.sql .
perl convert2mysql.pl out_d_request.sql
rm out_d_request.sql
cp ../abstract/out_d_auftrag.sql .
perl convert2mysql.pl out_d_auftrag.sql
rm out_d_auftrag.sql
cp ../abstract/out_d_order.sql .
perl convert2mysql.pl out_d_order.sql
rm out_d_order.sql
cp ../abstract/def_e_labels.sql .
perl convert2mysql.pl def_e_labels.sql
rm def_e_labels.sql
cp ../abstract/def_d_labels.sql .
perl convert2mysql.pl def_d_labels.sql
rm def_d_labels.sql
cp ../abstract/def_mp2000labels.sql .
perl convert2mysql.pl def_mp2000labels.sql
rm def_mp2000labels.sql
cp ../abstract/def_d_msgsys.sql .
perl convert2mysql.pl def_d_msgsys.sql
rm def_d_msgsys.sql
cp ../abstract/def_d_msgerr.sql .
perl convert2mysql.pl def_d_msgerr.sql
rm def_d_msgerr.sql
cp ../abstract/def_d_msgsql.sql .
perl convert2mysql.pl def_d_msgsql.sql
rm def_d_msgsql.sql
cp ../abstract/def_d_msginf.sql .
perl convert2mysql.pl def_d_msginf.sql
rm def_d_msginf.sql
cp ../abstract/def_d_msgwar.sql .
perl convert2mysql.pl def_d_msgwar.sql
rm def_d_msgwar.sql
cp ../abstract/def_d_msgakt.sql .
perl convert2mysql.pl def_d_msgakt.sql
rm def_d_msgakt.sql
cp ../abstract/def_e_msgsys.sql .
perl convert2mysql.pl def_e_msgsys.sql
rm def_e_msgsys.sql
cp ../abstract/def_e_msgerr.sql .
perl convert2mysql.pl def_e_msgerr.sql
rm def_e_msgerr.sql
cp ../abstract/def_e_msgsql.sql .
perl convert2mysql.pl def_e_msgsql.sql
rm def_e_msgsql.sql
cp ../abstract/def_e_msginf.sql .
perl convert2mysql.pl def_e_msginf.sql
rm def_e_msginf.sql
cp ../abstract/def_e_msgwar.sql .
perl convert2mysql.pl def_e_msgwar.sql
rm def_e_msgwar.sql
cp ../abstract/def_e_msgact.sql .
perl convert2mysql.pl def_e_msgact.sql
rm def_e_msgact.sql
cp ../abstract/def_d_aktionen.sql .
perl convert2mysql.pl def_d_aktionen.sql
rm def_d_aktionen.sql
cp ../abstract/def_aktionen.sql .
perl convert2mysql.pl def_aktionen.sql
rm def_aktionen.sql
cp ../abstract/def_d_status.sql .
perl convert2mysql.pl def_d_status.sql
rm def_d_status.sql
cp ../abstract/def_demo.sql .
perl convert2mysql.pl def_demo.sql
rm def_demo.sql
cp ../abstract/def_types.sql .
perl convert2mysql.pl def_types.sql
rm def_types.sql
cp ../abstract/def_geo.sql .
perl convert2mysql.pl def_geo.sql
rm def_geo.sql
cp ../abstract/def_d_init.sql .
perl convert2mysql.pl def_d_init.sql
rm def_d_init.sql
cp ../abstract/def_languages.sql .
perl convert2mysql.pl def_languages.sql
rm def_languages.sql
cp ../abstract/dbe_tdbviews.sql .
perl convert2mysql.pl dbe_tdbviews.sql
rm dbe_tdbviews.sql
#cp ../abstract/dbe_tdbviewsgrant.sql .
#perl convert2mysql.pl dbe_tdbviewsgrant.sql
#rm dbe_tdbviewsgrant.sql
#cp ../abstract/dbe_grant.sql .
#perl convert2mysql.pl dbe_grant.sql
#rm dbe_grant.sql
cp ../abstract/dbe_views.sql .
perl convert2mysql.pl dbe_views.sql
rm dbe_views.sql
cp ../abstract/dbe_index.sql .
perl convert2mysql.pl dbe_index.sql
rm dbe_index.sql
cp ../abstract/dbe_tables.sql .
perl convert2mysql.pl dbe_tables.sql
rm dbe_tables.sql
echo scripts converted
| {
"content_hash": "815495dc806df1acf86585f773c4e2ec",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 44,
"avg_line_length": 26.74137931034483,
"alnum_prop": 0.7569310122501612,
"repo_name": "yafraorg/yafra-database",
"id": "0d6a3925232870592bd1c98560b5bf0e7b926188",
"size": "3773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traveldb/mysql/convert.sh",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "40551"
},
{
"name": "JavaScript",
"bytes": "5548"
},
{
"name": "PLSQL",
"bytes": "298828"
},
{
"name": "Perl",
"bytes": "57097"
},
{
"name": "Python",
"bytes": "88665"
},
{
"name": "SQLPL",
"bytes": "226375"
},
{
"name": "Shell",
"bytes": "27174"
}
],
"symlink_target": ""
} |
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.metrics.util;
/**
* Utilities for Android UKM tests. Not to be used outside of testing.
*/
public class UkmUtilsForTest {
/**
* True if the UKM Service is enabled.
*/
public static boolean isEnabled() {
return nativeIsEnabled();
}
/**
* True if the input |sourceId| exists within the current UKM recording.
*/
public static boolean hasSourceWithId(long sourceId) {
return nativeHasSourceWithId(sourceId);
}
/**
* Record a single Source with the given |sourceId| with a dummy URL.
*/
public static void recordSourceWithId(long sourceId) {
nativeRecordSourceWithId(sourceId);
}
/**
* Get the UKM clientId.
*/
public static long getClientId() {
return nativeGetClientId();
}
private static native boolean nativeIsEnabled();
private static native boolean nativeHasSourceWithId(long sourceId);
private static native void nativeRecordSourceWithId(long sourceId);
private static native long nativeGetClientId();
}
| {
"content_hash": "e8cdc2c52bd4fe45bbcbacda8551878a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 28.6046511627907,
"alnum_prop": 0.6829268292682927,
"repo_name": "nwjs/chromium.src",
"id": "6b97d7ab13194d372c7fffbbb307aa5e7f80dbd1",
"size": "1230",
"binary": false,
"copies": "6",
"ref": "refs/heads/nw70",
"path": "chrome/browser/android/metrics/javatests/src/org/chromium/chrome/browser/metrics/util/UkmUtilsForTest.java",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import React from 'react';
export default () => <div>Welcome to our slice of paradise</div>; | {
"content_hash": "7b81caf5e8405c362cd35e916505b673",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 65,
"avg_line_length": 31,
"alnum_prop": 0.7096774193548387,
"repo_name": "juthatip/authentication-client",
"id": "c95dbc21fb044f68739f606f4f5d4d325674b28e",
"size": "93",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/components/welcome.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "363"
},
{
"name": "JavaScript",
"bytes": "12869"
}
],
"symlink_target": ""
} |
/*! \file thrust/system/cpp/memory.h
* \brief Managing memory associated with Thrust's standard C++ system.
*/
#pragma once
#include <thrust/detail/config.h>
#include <thrust/system/cpp/memory_resource.h>
#include <thrust/memory.h>
#include <thrust/detail/type_traits.h>
#include <thrust/mr/allocator.h>
#include <ostream>
namespace thrust
{
namespace system
{
namespace cpp
{
/*! Allocates an area of memory available to Thrust's <tt>cpp</tt> system.
* \param n Number of bytes to allocate.
* \return A <tt>cpp::pointer<void></tt> pointing to the beginning of the newly
* allocated memory. A null <tt>cpp::pointer<void></tt> is returned if
* an error occurs.
* \note The <tt>cpp::pointer<void></tt> returned by this function must be
* deallocated with \p cpp::free.
* \see cpp::free
* \see std::malloc
*/
inline pointer<void> malloc(std::size_t n);
/*! Allocates a typed area of memory available to Thrust's <tt>cpp</tt> system.
* \param n Number of elements to allocate.
* \return A <tt>cpp::pointer<T></tt> pointing to the beginning of the newly
* allocated elements. A null <tt>cpp::pointer<T></tt> is returned if
* an error occurs.
* \note The <tt>cpp::pointer<T></tt> returned by this function must be
* deallocated with \p cpp::free.
* \see cpp::free
* \see std::malloc
*/
template<typename T>
inline pointer<T> malloc(std::size_t n);
/*! Deallocates an area of memory previously allocated by <tt>cpp::malloc</tt>.
* \param ptr A <tt>cpp::pointer<void></tt> pointing to the beginning of an area
* of memory previously allocated with <tt>cpp::malloc</tt>.
* \see cpp::malloc
* \see std::free
*/
inline void free(pointer<void> ptr);
// XXX upon c++11
// template<typename T>
// using allocator = thrust::mr::stateless_resource_allocator<T, memory_resource>;
/*! \p cpp::allocator is the default allocator used by the \p cpp system's containers such as
* <tt>cpp::vector</tt> if no user-specified allocator is provided. \p cpp::allocator allocates
* (deallocates) storage with \p cpp::malloc (\p cpp::free).
*/
template<typename T>
struct allocator
: thrust::mr::stateless_resource_allocator<
T,
memory_resource
>
{
private:
typedef thrust::mr::stateless_resource_allocator<
T,
memory_resource
> base;
public:
/*! The \p rebind metafunction provides the type of an \p allocator
* instantiated with another type.
*
* \tparam U The other type to use for instantiation.
*/
template<typename U>
struct rebind
{
/*! The typedef \p other gives the type of the rebound \p allocator.
*/
typedef allocator<U> other;
};
/*! No-argument constructor has no effect.
*/
__host__ __device__
inline allocator() {}
/*! Copy constructor has no effect.
*/
__host__ __device__
inline allocator(const allocator & other) : base(other) {}
/*! Constructor from other \p allocator has no effect.
*/
template<typename U>
__host__ __device__
inline allocator(const allocator<U> & other) : base(other) {}
#if THRUST_CPP_DIALECT >= 2011
allocator & operator=(const allocator &) = default;
#endif
/*! Destructor has no effect.
*/
__host__ __device__
inline ~allocator() {}
}; // end allocator
} // end cpp
/*! \}
*/
} // end system
/*! \namespace thrust::cpp
* \brief \p thrust::cpp is a top-level alias for thrust::system::cpp.
*/
namespace cpp
{
using thrust::system::cpp::malloc;
using thrust::system::cpp::free;
using thrust::system::cpp::allocator;
} // end cpp
} // end thrust
#include <thrust/system/cpp/detail/memory.inl>
| {
"content_hash": "c1c6ab5ca458e7ef6542b708b6656c5c",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 96,
"avg_line_length": 26.543478260869566,
"alnum_prop": 0.6592956592956593,
"repo_name": "jaredhoberock/thrust",
"id": "8f6fa2969eba23ddbb51eb416835ead9fdc3e294",
"size": "4279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thrust/system/cpp/memory.h",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "206118"
},
{
"name": "C++",
"bytes": "4634867"
},
{
"name": "CMake",
"bytes": "32799"
},
{
"name": "Cuda",
"bytes": "3271429"
},
{
"name": "Makefile",
"bytes": "18286"
},
{
"name": "Perl",
"bytes": "19851"
},
{
"name": "Python",
"bytes": "95288"
},
{
"name": "Shell",
"bytes": "4514"
}
],
"symlink_target": ""
} |
package org.jboss.netty.buffer;
import static org.easymock.EasyMock.*;
import static org.jboss.netty.buffer.ChannelBuffers.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
import org.junit.Test;
/**
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
public class ReadOnlyChannelBufferTest {
@Test(expected = NullPointerException.class)
public void shouldNotAllowNullInConstructor() {
new ReadOnlyChannelBuffer(null);
}
@Test
public void testUnmodifiableBuffer() {
assertTrue(ChannelBuffers.unmodifiableBuffer(ChannelBuffers.buffer(1)) instanceof ReadOnlyChannelBuffer);
}
@Test
public void testUnwrap() {
ChannelBuffer buf = ChannelBuffers.buffer(1);
assertSame(buf, ((WrappedChannelBuffer) ChannelBuffers.unmodifiableBuffer(buf)).unwrap());
}
@Test
public void shouldHaveSameByteOrder() {
ChannelBuffer buf = ChannelBuffers.buffer(ChannelBuffers.LITTLE_ENDIAN, 1);
assertSame(ChannelBuffers.LITTLE_ENDIAN, ChannelBuffers.unmodifiableBuffer(buf).order());
}
@Test
public void shouldReturnReadOnlyDerivedBuffer() {
ChannelBuffer buf = ChannelBuffers.unmodifiableBuffer(ChannelBuffers.buffer(1));
assertTrue(buf.duplicate() instanceof ReadOnlyChannelBuffer);
assertTrue(buf.slice() instanceof ReadOnlyChannelBuffer);
assertTrue(buf.slice(0, 1) instanceof ReadOnlyChannelBuffer);
assertTrue(buf.duplicate() instanceof ReadOnlyChannelBuffer);
}
@Test
public void shouldReturnWritableCopy() {
ChannelBuffer buf = ChannelBuffers.unmodifiableBuffer(ChannelBuffers.buffer(1));
assertFalse(buf.copy() instanceof ReadOnlyChannelBuffer);
}
@Test
public void shouldForwardReadCallsBlindly() throws Exception {
ChannelBuffer buf = createStrictMock(ChannelBuffer.class);
expect(buf.readerIndex()).andReturn(0).anyTimes();
expect(buf.writerIndex()).andReturn(0).anyTimes();
expect(buf.capacity()).andReturn(0).anyTimes();
expect(buf.getBytes(1, (GatheringByteChannel) null, 2)).andReturn(3);
buf.getBytes(4, (OutputStream) null, 5);
buf.getBytes(6, (byte[]) null, 7, 8);
buf.getBytes(9, (ChannelBuffer) null, 10, 11);
buf.getBytes(12, (ByteBuffer) null);
expect(buf.getByte(13)).andReturn(Byte.valueOf((byte) 14));
expect(buf.getShort(15)).andReturn(Short.valueOf((short) 16));
expect(buf.getUnsignedMedium(17)).andReturn(18);
expect(buf.getInt(19)).andReturn(20);
expect(buf.getLong(21)).andReturn(22L);
ByteBuffer bb = ByteBuffer.allocate(100);
ByteBuffer[] bbs = new ByteBuffer[] { ByteBuffer.allocate(101), ByteBuffer.allocate(102) };
expect(buf.toByteBuffer(23, 24)).andReturn(bb);
expect(buf.toByteBuffers(25, 26)).andReturn(bbs);
expect(buf.capacity()).andReturn(27);
replay(buf);
ChannelBuffer roBuf = unmodifiableBuffer(buf);
assertEquals(3, roBuf.getBytes(1, (GatheringByteChannel) null, 2));
roBuf.getBytes(4, (OutputStream) null, 5);
roBuf.getBytes(6, (byte[]) null, 7, 8);
roBuf.getBytes(9, (ChannelBuffer) null, 10, 11);
roBuf.getBytes(12, (ByteBuffer) null);
assertEquals((byte) 14, roBuf.getByte(13));
assertEquals((short) 16, roBuf.getShort(15));
assertEquals(18, roBuf.getUnsignedMedium(17));
assertEquals(20, roBuf.getInt(19));
assertEquals(22L, roBuf.getLong(21));
ByteBuffer roBB = roBuf.toByteBuffer(23, 24);
assertEquals(100, roBB.capacity());
assertTrue(roBB.isReadOnly());
ByteBuffer[] roBBs = roBuf.toByteBuffers(25, 26);
assertEquals(2, roBBs.length);
assertEquals(101, roBBs[0].capacity());
assertTrue(roBBs[0].isReadOnly());
assertEquals(102, roBBs[1].capacity());
assertTrue(roBBs[1].isReadOnly());
assertEquals(27, roBuf.capacity());
verify(buf);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectDiscardReadBytes() {
unmodifiableBuffer(EMPTY_BUFFER).discardReadBytes();
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetByte() {
unmodifiableBuffer(EMPTY_BUFFER).setByte(0, (byte) 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetShort() {
unmodifiableBuffer(EMPTY_BUFFER).setShort(0, (short) 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetMedium() {
unmodifiableBuffer(EMPTY_BUFFER).setMedium(0, 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetInt() {
unmodifiableBuffer(EMPTY_BUFFER).setInt(0, 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetLong() {
unmodifiableBuffer(EMPTY_BUFFER).setLong(0, 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetBytes1() throws IOException {
unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (InputStream) null, 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetBytes2() throws IOException {
unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ScatteringByteChannel) null, 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetBytes3() {
unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (byte[]) null, 0, 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetBytes4() {
unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ChannelBuffer) null, 0, 0);
}
@Test(expected = UnsupportedOperationException.class)
public void shouldRejectSetBytes5() {
unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ByteBuffer) null);
}
}
| {
"content_hash": "48966967aa5d828e706f9fd22c53ab8d",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 113,
"avg_line_length": 36.77777777777778,
"alnum_prop": 0.6885037366830975,
"repo_name": "aperepel/netty",
"id": "7c89e8d09f6cbc220986236aa8ffe9c94f524c86",
"size": "6912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/java/org/jboss/netty/buffer/ReadOnlyChannelBufferTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3693862"
}
],
"symlink_target": ""
} |
package com.intellij.psi.codeStyle;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.components.State;
import com.intellij.openapi.components.Storage;
import org.jetbrains.annotations.NotNull;
import java.util.Collection;
import java.util.Collections;
@State(name = "CodeStyleSettingsManager", storages = @Storage("code.style.schemes"))
public final class AppCodeStyleSettingsManager extends CodeStyleSettingsManager {
public AppCodeStyleSettingsManager() {
registerExtensionPointListeners(ApplicationManager.getApplication());
}
@Override
protected void registerExtensionPointListeners(@NotNull Disposable disposable) {
super.registerExtensionPointListeners(disposable);
LanguageCodeStyleSettingsProvider.EP_NAME.addExtensionPointListener(
LanguageCodeStyleSettingsProvider::resetSettingsPagesProviders, disposable);
}
@Override
protected Collection<CodeStyleSettings> enumSettings() {
return getMainProjectCodeStyle() != null ?
Collections.singletonList(getMainProjectCodeStyle()) : Collections.emptyList();
}
}
| {
"content_hash": "6605b79b1344c0d58fa4a9be1dd89147",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 90,
"avg_line_length": 37.03225806451613,
"alnum_prop": 0.8109756097560976,
"repo_name": "leafclick/intellij-community",
"id": "8d932716a74f4b67e6e6c109750299ddff6a5155",
"size": "1289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform/lang-api/src/com/intellij/psi/codeStyle/AppCodeStyleSettingsManager.java",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
Simple splash screen using C#
| {
"content_hash": "a0feb49f398da417fbb53d2505e6e112",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.8,
"repo_name": "kaviteshsingh/YetAnotherSplashScreenInCSharp",
"id": "bcc4030506cd0458fe2fc16026832836b94ac78b",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "9544"
}
],
"symlink_target": ""
} |
int dynamic_cast;
| {
"content_hash": "fc0e88590825a9b46531bbdcd09fa488",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 17,
"avg_line_length": 18,
"alnum_prop": 0.7777777777777778,
"repo_name": "kit-transue/software-emancipation-discover",
"id": "021baba157add3b5ef6c10020959eba74a8f5ff0",
"size": "18",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "psethome/lib/autoflag/positive_tests/no_rtti.cpp",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "178658"
},
{
"name": "C",
"bytes": "2730024"
},
{
"name": "C#",
"bytes": "308"
},
{
"name": "C++",
"bytes": "23349265"
},
{
"name": "CSS",
"bytes": "1861130"
},
{
"name": "Crystal",
"bytes": "105"
},
{
"name": "Emacs Lisp",
"bytes": "50226"
},
{
"name": "GLSL",
"bytes": "2698016"
},
{
"name": "Gnuplot",
"bytes": "1219"
},
{
"name": "Groff",
"bytes": "10934"
},
{
"name": "HTML",
"bytes": "10534201"
},
{
"name": "Java",
"bytes": "272548"
},
{
"name": "Lex",
"bytes": "269984"
},
{
"name": "Makefile",
"bytes": "487619"
},
{
"name": "Objective-C",
"bytes": "10093"
},
{
"name": "Perl",
"bytes": "719227"
},
{
"name": "Perl6",
"bytes": "15568"
},
{
"name": "PostScript",
"bytes": "25588"
},
{
"name": "Ruby",
"bytes": "77891"
},
{
"name": "Scilab",
"bytes": "11247"
},
{
"name": "Shell",
"bytes": "320920"
},
{
"name": "Smalltalk",
"bytes": "83"
},
{
"name": "SuperCollider",
"bytes": "23447"
},
{
"name": "Tcl",
"bytes": "1047438"
},
{
"name": "XSLT",
"bytes": "5277"
},
{
"name": "Yacc",
"bytes": "514644"
}
],
"symlink_target": ""
} |
ACCEPTED
#### According to
Index Fungorum
#### Published in
null
#### Original name
Cenangella spiraeicola Henn.
### Remarks
null | {
"content_hash": "eb70d221ac04ddda295eb59055fce446",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 28,
"avg_line_length": 10.23076923076923,
"alnum_prop": 0.7142857142857143,
"repo_name": "mdoering/backbone",
"id": "fa338980ed49ef602effc7606dd0d7392bf9b9d6",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "life/Fungi/Ascomycota/Leotiomycetes/Helotiales/Helotiaceae/Scleroderris/Scleroderris spiraeicola/README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
static char incompletePackageInfos[DPA_MAX_PACKET_INFO_BUFFER_SIZE];
static DPAUCS_ip_packetInfo_t* incompletePackageInfos_end;
bool DPAUCS_layer3_areFragmentsFromSamePacket( DPAUCS_ip_packetInfo_t* a, DPAUCS_ip_packetInfo_t* b ){
if( a == b )
return true;
if( a->handler != b->handler )
return false;
if( (char*)a < incompletePackageInfos || a >= incompletePackageInfos_end
|| (char*)b < incompletePackageInfos || b >= incompletePackageInfos_end
){
if( a->handler->isSamePacket )
return a->handler->isSamePacket(a,b);
}
return false;
}
static inline size_t getRealPacketInfoSize( void ){
static size_t size = 0;
if( !size ){
for( struct DPAUCS_fragmentHandler_list* it = DPAUCS_fragmentHandler_list; it; it = it->next ){
size_t s = it->entry->packetInfo_size;
if( size < s )
size = s;
}
}
return size;
}
static inline void nextPacketInfo( DPAUCS_ip_packetInfo_t** it ){
*it += getRealPacketInfoSize();
}
static inline DPAUCS_ip_packetInfo_t* getNextIncompletePacket( void ){
static DPAUCS_ip_packetInfo_t* it = (DPAUCS_ip_packetInfo_t*)incompletePackageInfos;
it->valid = false;
nextPacketInfo( &it );
if( it > incompletePackageInfos_end )
it = (DPAUCS_ip_packetInfo_t*)incompletePackageInfos;
return it;
}
DPAUCS_INIT {
incompletePackageInfos_end = (DPAUCS_ip_packetInfo_t*)( incompletePackageInfos + ( DPA_MAX_PACKET_INFO_BUFFER_SIZE % getRealPacketInfoSize() ) );
}
DPAUCS_ip_fragment_t** DPAUCS_layer3_allocFragment( DPAUCS_ip_packetInfo_t* packet, uint16_t size ){
DPAUCS_ip_packetInfo_t* info = 0;
if(!packet){
info = getNextIncompletePacket();
}else if( (char*)packet < incompletePackageInfos || packet >= incompletePackageInfos_end ){
info = DPAUCS_layer3_normalize_packet_info_ptr( packet );
if(!info)
info = DPAUCS_layer3_save_packet_info(info);
}else{
info = packet;
}
DPAUCS_ip_fragment_t** f_ptr = (DPAUCS_ip_fragment_t**)DPAUCS_createFragment(packet->handler,size);
if(!f_ptr)
return 0;
DPAUCS_ip_fragment_t* f = *f_ptr;
if(!info->valid){ // if packet info and all its fragments were removed to gain enought space for new fragment
DPAUCS_removeFragment((DPAUCS_fragment_t**)f_ptr);
return 0;
}
f->info = info;
return f_ptr;
}
DPAUCS_ip_packetInfo_t* DPAUCS_layer3_save_packet_info( DPAUCS_ip_packetInfo_t* packet ){
DPAUCS_ip_packetInfo_t* info = getNextIncompletePacket();
memcpy(info,packet,packet->handler->packetInfo_size);
return info;
}
DPAUCS_ip_packetInfo_t* DPAUCS_layer3_normalize_packet_info_ptr(DPAUCS_ip_packetInfo_t* ipf){
for(
DPAUCS_ip_packetInfo_t* it = (DPAUCS_ip_packetInfo_t*)incompletePackageInfos;
it < incompletePackageInfos_end;
nextPacketInfo( &it )
) if( it->valid && DPAUCS_layer3_areFragmentsFromSamePacket( it, ipf ) )
return it;
return 0;
}
bool DPAUCS_layer3_isNextFragment( DPAUCS_ip_fragment_t* ipf ){
return !ipf->offset || ipf->info->offset == ipf->offset;
}
struct searchFollowingFragmentArgs {
DPAUCS_ip_fragment_t* prev;
DPAUCS_ip_fragment_t** result;
};
static bool searchFollowingFragment( DPAUCS_fragment_t** f, void* arg ){
struct searchFollowingFragmentArgs* args = arg;
DPAUCS_ip_fragment_t** ipf_ptr = (DPAUCS_ip_fragment_t**)f;
DPAUCS_ip_fragment_t* ipf = *ipf_ptr;
bool isNext = DPAUCS_layer3_isNextFragment(ipf);
if( !isNext || ipf->info != args->prev->info )
return true;
args->result = ipf_ptr;
return false;
}
DPAUCS_ip_fragment_t** DPAUCS_layer3_searchFollowingFragment( DPAUCS_ip_fragment_t* ipf ){
struct searchFollowingFragmentArgs args = {
ipf,
0
};
DPAUCS_eachFragment( ipf->info->handler, &searchFollowingFragment, &args );
return args.result;
}
void DPAUCS_layer3_updatePackatOffset( DPAUCS_ip_fragment_t* f ){
if(!DPAUCS_layer3_isNextFragment(f))
return;
f->info->offset += f->length;
}
static bool removeIpFragment( DPAUCS_fragment_t** f, void* packet_ptr ){
if( ((DPAUCS_ip_fragment_t*)*f)->info == packet_ptr )
DPAUCS_removeFragment(f);
return true;
}
void DPAUCS_layer3_removePacket( DPAUCS_ip_packetInfo_t* ipf ){
if(!ipf->valid)
return;
ipf->valid = false;
DPAUCS_eachFragment( ipf->handler, &removeIpFragment, ipf );
if(ipf->onremove)
(ipf->onremove)(ipf);
}
void DPAUCS_layer3_removeFragment( DPAUCS_ip_fragment_t** f ){
removeIpFragment((DPAUCS_fragment_t**)f,(*f)->info);
}
| {
"content_hash": "6c9031d71b66fac553bab2fcf59a79df",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 147,
"avg_line_length": 32.06521739130435,
"alnum_prop": 0.695819209039548,
"repo_name": "Daniel-Abrecht/DPA-UCS",
"id": "aaf550f4f484149ca20b779756cdaa3649019c4e",
"size": "4549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/protocol/ip_stack.c",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "321834"
},
{
"name": "C++",
"bytes": "889"
},
{
"name": "Makefile",
"bytes": "7729"
},
{
"name": "Shell",
"bytes": "2340"
}
],
"symlink_target": ""
} |
package com.tom.defense.client;
import org.lwjgl.opengl.GL11;
import net.minecraft.block.state.IBlockState;
import net.minecraft.client.renderer.GlStateManager;
import net.minecraft.client.resources.I18n;
import net.minecraft.util.math.MathHelper;
import com.tom.client.TileEntitySpecialRendererTomsMod;
import com.tom.defense.tileentity.TileEntityForceCapacitor;
import com.tom.util.TomsModUtils;
public class TileEntityForceCapacitorRenderer extends TileEntitySpecialRendererTomsMod<TileEntityForceCapacitor> {
@Override
public void renderTileEntityAt(TileEntityForceCapacitor te, double x, double y, double z, float partialTicks, int destroyStage, IBlockState state) {
String header = I18n.format("tile.tm.forceCapacitor.name");
String capacity = I18n.format("tomsmod.render.capacity") + ":";
String range = I18n.format("tomsmod.render.range") + ":";
String linkedDevices = I18n.format("tomsmod.render.linkedDevices") + ":";
String capValue = te.clientPer / 10D + "%";
String rangeValue = "" + te.range;
String linkedValue = "" + te.linkedDevices;
GL11.glPushMatrix(); // start
GL11.glTranslatef((float) x + 0.5F, (float) y + 1.5F, (float) z + 0.5F);
GL11.glScalef(1.0F, -1F, -1F);
TomsModUtils.rotateMatrixByMetadata(te.getFacing(state).ordinal());
GL11.glTranslatef(0, 1, 0.5F - 1 - 0.01F);
float f = 0.0075F;
int i = MathHelper.floor((0.5 - (1.2 / 16D)) / f);
GL11.glScalef(f, f, f);
GlStateManager.translate(0.0F, 0.5F * f, 0.07F * f);
getFontRenderer().drawString(header, -getFontRenderer().getStringWidth(header) / 2, -i, 0);
getFontRenderer().drawString(capacity, -i, -i + 80, 0);
getFontRenderer().drawString(range, -i, -i + 90, 0);
getFontRenderer().drawString(linkedDevices, -i, -i + 100, 0);
getFontRenderer().drawString(capValue, -i + 110 - getFontRenderer().getStringWidth(capValue), -i + 80, 0);
getFontRenderer().drawString(rangeValue, -i + 110 - getFontRenderer().getStringWidth(rangeValue), -i + 90, 0);
getFontRenderer().drawString(linkedValue, -i + 110 - getFontRenderer().getStringWidth(linkedValue), -i + 100, 0);
GL11.glPopMatrix(); // end
}
}
| {
"content_hash": "f261d27a570bb21c1f8f35061c6a0b89",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 149,
"avg_line_length": 49.41860465116279,
"alnum_prop": 0.7284705882352941,
"repo_name": "tom5454/Toms-Mod",
"id": "91c138d36730560b940da3cd8380a34799289ceb",
"size": "2125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/java/com/tom/defense/client/TileEntityForceCapacitorRenderer.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "3439325"
}
],
"symlink_target": ""
} |
/* ====================================================================
* Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
*
* Portions of the attached software ("Contribution") are developed by
* SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
*
* The Contribution is licensed pursuant to the OpenSSL open source
* license provided above.
*
* ECC cipher suite support in OpenSSL originally written by
* Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories.
*
*/
/* ====================================================================
* Copyright 2005 Nokia. All rights reserved.
*
* The portions of the attached software ("Contribution") is developed by
* Nokia Corporation and is licensed pursuant to the OpenSSL open source
* license.
*
* The Contribution, originally written by Mika Kousa and Pasi Eronen of
* Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
* support (see RFC 4279) to OpenSSL.
*
* No patent licenses or other rights except those expressly stated in
* the OpenSSL open source license shall be deemed granted or received
* expressly, by implication, estoppel, or otherwise.
*
* No assurances are provided by Nokia that the Contribution does not
* infringe the patent or other intellectual property rights of any third
* party or that the license provides you with all the necessary rights
* to make use of the Contribution.
*
* THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
* ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
* SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
* OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
* OTHERWISE. */
#include <openssl/ssl.h>
#include <assert.h>
#include <string.h>
#include <openssl/buf.h>
#include <openssl/dh.h>
#include <openssl/digest.h>
#include <openssl/err.h>
#include <openssl/md5.h>
#include <openssl/mem.h>
#include <openssl/nid.h>
#include "internal.h"
int ssl3_supports_cipher(const SSL_CIPHER *cipher) {
return 1;
}
int ssl3_set_handshake_header(SSL *ssl, int htype, unsigned long len) {
uint8_t *p = (uint8_t *)ssl->init_buf->data;
*(p++) = htype;
l2n3(len, p);
ssl->init_num = (int)len + SSL3_HM_HEADER_LENGTH;
ssl->init_off = 0;
/* Add the message to the handshake hash. */
return ssl3_update_handshake_hash(ssl, (uint8_t *)ssl->init_buf->data,
ssl->init_num);
}
int ssl3_handshake_write(SSL *ssl) {
return ssl3_do_write(ssl, SSL3_RT_HANDSHAKE);
}
int ssl3_new(SSL *ssl) {
SSL3_STATE *s3;
s3 = OPENSSL_malloc(sizeof *s3);
if (s3 == NULL) {
goto err;
}
memset(s3, 0, sizeof *s3);
EVP_MD_CTX_init(&s3->handshake_hash);
EVP_MD_CTX_init(&s3->handshake_md5);
ssl->s3 = s3;
/* Set the version to the highest supported version.
*
* TODO(davidben): Move this field into |s3|, have it store the normalized
* protocol version, and implement this pre-negotiation quirk in |SSL_version|
* at the API boundary rather than in internal state. */
ssl->version = TLS1_2_VERSION;
return 1;
err:
return 0;
}
void ssl3_free(SSL *ssl) {
if (ssl == NULL || ssl->s3 == NULL) {
return;
}
ssl3_cleanup_key_block(ssl);
ssl_read_buffer_clear(ssl);
ssl_write_buffer_clear(ssl);
SSL_ECDH_CTX_cleanup(&ssl->s3->tmp.ecdh_ctx);
OPENSSL_free(ssl->s3->tmp.peer_key);
sk_X509_NAME_pop_free(ssl->s3->tmp.ca_names, X509_NAME_free);
OPENSSL_free(ssl->s3->tmp.certificate_types);
OPENSSL_free(ssl->s3->tmp.peer_ellipticcurvelist);
OPENSSL_free(ssl->s3->tmp.peer_psk_identity_hint);
ssl3_free_handshake_buffer(ssl);
ssl3_free_handshake_hash(ssl);
OPENSSL_free(ssl->s3->next_proto_negotiated);
OPENSSL_free(ssl->s3->alpn_selected);
SSL_AEAD_CTX_free(ssl->s3->aead_read_ctx);
SSL_AEAD_CTX_free(ssl->s3->aead_write_ctx);
OPENSSL_cleanse(ssl->s3, sizeof *ssl->s3);
OPENSSL_free(ssl->s3);
ssl->s3 = NULL;
}
int SSL_session_reused(const SSL *ssl) {
return ssl->hit;
}
int SSL_total_renegotiations(const SSL *ssl) {
return ssl->s3->total_renegotiations;
}
int SSL_num_renegotiations(const SSL *ssl) {
return SSL_total_renegotiations(ssl);
}
int SSL_CTX_need_tmp_RSA(const SSL_CTX *ctx) {
return 0;
}
int SSL_need_tmp_RSA(const SSL *ssl) {
return 0;
}
int SSL_CTX_set_tmp_rsa(SSL_CTX *ctx, const RSA *rsa) {
return 1;
}
int SSL_set_tmp_rsa(SSL *ssl, const RSA *rsa) {
return 1;
}
int SSL_CTX_set_tmp_dh(SSL_CTX *ctx, const DH *dh) {
DH_free(ctx->cert->dh_tmp);
ctx->cert->dh_tmp = DHparams_dup(dh);
if (ctx->cert->dh_tmp == NULL) {
OPENSSL_PUT_ERROR(SSL, ERR_R_DH_LIB);
return 0;
}
return 1;
}
int SSL_set_tmp_dh(SSL *ssl, const DH *dh) {
DH_free(ssl->cert->dh_tmp);
ssl->cert->dh_tmp = DHparams_dup(dh);
if (ssl->cert->dh_tmp == NULL) {
OPENSSL_PUT_ERROR(SSL, ERR_R_DH_LIB);
return 0;
}
return 1;
}
int SSL_CTX_set_tmp_ecdh(SSL_CTX *ctx, const EC_KEY *ec_key) {
if (ec_key == NULL || EC_KEY_get0_group(ec_key) == NULL) {
OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER);
return 0;
}
int nid = EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key));
return SSL_CTX_set1_curves(ctx, &nid, 1);
}
int SSL_set_tmp_ecdh(SSL *ssl, const EC_KEY *ec_key) {
if (ec_key == NULL || EC_KEY_get0_group(ec_key) == NULL) {
OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER);
return 0;
}
int nid = EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key));
return SSL_set1_curves(ssl, &nid, 1);
}
int SSL_CTX_enable_tls_channel_id(SSL_CTX *ctx) {
ctx->tlsext_channel_id_enabled = 1;
return 1;
}
int SSL_enable_tls_channel_id(SSL *ssl) {
ssl->tlsext_channel_id_enabled = 1;
return 1;
}
static int is_p256_key(EVP_PKEY *private_key) {
const EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(private_key);
return ec_key != NULL &&
EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key)) ==
NID_X9_62_prime256v1;
}
int SSL_CTX_set1_tls_channel_id(SSL_CTX *ctx, EVP_PKEY *private_key) {
if (!is_p256_key(private_key)) {
OPENSSL_PUT_ERROR(SSL, SSL_R_CHANNEL_ID_NOT_P256);
return 0;
}
EVP_PKEY_free(ctx->tlsext_channel_id_private);
ctx->tlsext_channel_id_private = EVP_PKEY_up_ref(private_key);
ctx->tlsext_channel_id_enabled = 1;
return 1;
}
int SSL_set1_tls_channel_id(SSL *ssl, EVP_PKEY *private_key) {
if (!is_p256_key(private_key)) {
OPENSSL_PUT_ERROR(SSL, SSL_R_CHANNEL_ID_NOT_P256);
return 0;
}
EVP_PKEY_free(ssl->tlsext_channel_id_private);
ssl->tlsext_channel_id_private = EVP_PKEY_up_ref(private_key);
ssl->tlsext_channel_id_enabled = 1;
return 1;
}
size_t SSL_get_tls_channel_id(SSL *ssl, uint8_t *out, size_t max_out) {
if (!ssl->s3->tlsext_channel_id_valid) {
return 0;
}
memcpy(out, ssl->s3->tlsext_channel_id, (max_out < 64) ? max_out : 64);
return 64;
}
int SSL_set_tlsext_host_name(SSL *ssl, const char *name) {
OPENSSL_free(ssl->tlsext_hostname);
ssl->tlsext_hostname = NULL;
if (name == NULL) {
return 1;
}
size_t len = strlen(name);
if (len == 0 || len > TLSEXT_MAXLEN_host_name) {
OPENSSL_PUT_ERROR(SSL, SSL_R_SSL3_EXT_INVALID_SERVERNAME);
return 0;
}
ssl->tlsext_hostname = BUF_strdup(name);
if (ssl->tlsext_hostname == NULL) {
OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE);
return 0;
}
return 1;
}
size_t SSL_get0_certificate_types(SSL *ssl, const uint8_t **out_types) {
if (ssl->server || !ssl->s3->tmp.cert_req) {
*out_types = NULL;
return 0;
}
*out_types = ssl->s3->tmp.certificate_types;
return ssl->s3->tmp.num_certificate_types;
}
int SSL_CTX_set1_curves(SSL_CTX *ctx, const int *curves, size_t curves_len) {
return tls1_set_curves(&ctx->tlsext_ellipticcurvelist,
&ctx->tlsext_ellipticcurvelist_length, curves,
curves_len);
}
int SSL_set1_curves(SSL *ssl, const int *curves, size_t curves_len) {
return tls1_set_curves(&ssl->tlsext_ellipticcurvelist,
&ssl->tlsext_ellipticcurvelist_length, curves,
curves_len);
}
int SSL_CTX_set_tlsext_servername_callback(
SSL_CTX *ctx, int (*callback)(SSL *ssl, int *out_alert, void *arg)) {
ctx->tlsext_servername_callback = callback;
return 1;
}
int SSL_CTX_set_tlsext_servername_arg(SSL_CTX *ctx, void *arg) {
ctx->tlsext_servername_arg = arg;
return 1;
}
int SSL_CTX_get_tlsext_ticket_keys(SSL_CTX *ctx, void *out, size_t len) {
if (out == NULL) {
return 48;
}
if (len != 48) {
OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_TICKET_KEYS_LENGTH);
return 0;
}
uint8_t *out_bytes = out;
memcpy(out_bytes, ctx->tlsext_tick_key_name, 16);
memcpy(out_bytes + 16, ctx->tlsext_tick_hmac_key, 16);
memcpy(out_bytes + 32, ctx->tlsext_tick_aes_key, 16);
return 1;
}
int SSL_CTX_set_tlsext_ticket_keys(SSL_CTX *ctx, const void *in, size_t len) {
if (in == NULL) {
return 48;
}
if (len != 48) {
OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_TICKET_KEYS_LENGTH);
return 0;
}
const uint8_t *in_bytes = in;
memcpy(ctx->tlsext_tick_key_name, in_bytes, 16);
memcpy(ctx->tlsext_tick_hmac_key, in_bytes + 16, 16);
memcpy(ctx->tlsext_tick_aes_key, in_bytes + 32, 16);
return 1;
}
int SSL_CTX_set_tlsext_ticket_key_cb(
SSL_CTX *ctx, int (*callback)(SSL *ssl, uint8_t *key_name, uint8_t *iv,
EVP_CIPHER_CTX *ctx, HMAC_CTX *hmac_ctx,
int encrypt)) {
ctx->tlsext_ticket_key_cb = callback;
return 1;
}
struct ssl_cipher_preference_list_st *ssl_get_cipher_preferences(SSL *ssl) {
if (ssl->cipher_list != NULL) {
return ssl->cipher_list;
}
if (ssl->version >= TLS1_1_VERSION && ssl->ctx->cipher_list_tls11 != NULL) {
return ssl->ctx->cipher_list_tls11;
}
if (ssl->version >= TLS1_VERSION && ssl->ctx->cipher_list_tls10 != NULL) {
return ssl->ctx->cipher_list_tls10;
}
if (ssl->ctx->cipher_list != NULL) {
return ssl->ctx->cipher_list;
}
return NULL;
}
const SSL_CIPHER *ssl3_choose_cipher(
SSL *ssl, STACK_OF(SSL_CIPHER) *clnt,
struct ssl_cipher_preference_list_st *server_pref) {
const SSL_CIPHER *c, *ret = NULL;
STACK_OF(SSL_CIPHER) *srvr = server_pref->ciphers, *prio, *allow;
size_t i;
int ok;
size_t cipher_index;
uint32_t alg_k, alg_a, mask_k, mask_a;
/* in_group_flags will either be NULL, or will point to an array of bytes
* which indicate equal-preference groups in the |prio| stack. See the
* comment about |in_group_flags| in the |ssl_cipher_preference_list_st|
* struct. */
const uint8_t *in_group_flags;
/* group_min contains the minimal index so far found in a group, or -1 if no
* such value exists yet. */
int group_min = -1;
if (ssl->options & SSL_OP_CIPHER_SERVER_PREFERENCE) {
prio = srvr;
in_group_flags = server_pref->in_group_flags;
allow = clnt;
} else {
prio = clnt;
in_group_flags = NULL;
allow = srvr;
}
ssl_get_compatible_server_ciphers(ssl, &mask_k, &mask_a);
for (i = 0; i < sk_SSL_CIPHER_num(prio); i++) {
c = sk_SSL_CIPHER_value(prio, i);
ok = 1;
/* Check the TLS version. */
if (SSL_CIPHER_get_min_version(c) > ssl3_protocol_version(ssl)) {
ok = 0;
}
alg_k = c->algorithm_mkey;
alg_a = c->algorithm_auth;
ok = ok && (alg_k & mask_k) && (alg_a & mask_a);
if (ok && sk_SSL_CIPHER_find(allow, &cipher_index, c)) {
if (in_group_flags != NULL && in_group_flags[i] == 1) {
/* This element of |prio| is in a group. Update the minimum index found
* so far and continue looking. */
if (group_min == -1 || (size_t)group_min > cipher_index) {
group_min = cipher_index;
}
} else {
if (group_min != -1 && (size_t)group_min < cipher_index) {
cipher_index = group_min;
}
ret = sk_SSL_CIPHER_value(allow, cipher_index);
break;
}
}
if (in_group_flags != NULL && in_group_flags[i] == 0 && group_min != -1) {
/* We are about to leave a group, but we found a match in it, so that's
* our answer. */
ret = sk_SSL_CIPHER_value(allow, group_min);
break;
}
}
return ret;
}
int ssl3_get_req_cert_type(SSL *ssl, uint8_t *p) {
int ret = 0;
const uint8_t *sig;
size_t i, siglen;
int have_rsa_sign = 0;
int have_ecdsa_sign = 0;
/* get configured sigalgs */
siglen = tls12_get_psigalgs(ssl, &sig);
for (i = 0; i < siglen; i += 2, sig += 2) {
switch (sig[1]) {
case TLSEXT_signature_rsa:
have_rsa_sign = 1;
break;
case TLSEXT_signature_ecdsa:
have_ecdsa_sign = 1;
break;
}
}
if (have_rsa_sign) {
p[ret++] = SSL3_CT_RSA_SIGN;
}
/* ECDSA certs can be used with RSA cipher suites as well so we don't need to
* check for SSL_kECDH or SSL_kECDHE. */
if (ssl->version >= TLS1_VERSION && have_ecdsa_sign) {
p[ret++] = TLS_CT_ECDSA_SIGN;
}
return ret;
}
/* If we are using default SHA1+MD5 algorithms switch to new SHA256 PRF and
* handshake macs if required. */
uint32_t ssl_get_algorithm_prf(const SSL *ssl) {
uint32_t algorithm_prf = ssl->s3->tmp.new_cipher->algorithm_prf;
if (algorithm_prf == SSL_HANDSHAKE_MAC_DEFAULT &&
ssl3_protocol_version(ssl) >= TLS1_2_VERSION) {
return SSL_HANDSHAKE_MAC_SHA256;
}
return algorithm_prf;
}
| {
"content_hash": "79da3feaa0ea6f2f2855fbac0404f7d2",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 80,
"avg_line_length": 30.53904761904762,
"alnum_prop": 0.6452317095989522,
"repo_name": "kku1993/libquic",
"id": "7df046f0e0095c7ba7bf39a609f0e96c1cdc9c0e",
"size": "19195",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "boringssl/ssl/s3_lib.c",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "187232"
},
{
"name": "C",
"bytes": "5998742"
},
{
"name": "C++",
"bytes": "7099361"
},
{
"name": "CMake",
"bytes": "52252"
},
{
"name": "CSS",
"bytes": "870"
},
{
"name": "Go",
"bytes": "544017"
},
{
"name": "Makefile",
"bytes": "294632"
},
{
"name": "Objective-C",
"bytes": "20209"
},
{
"name": "Objective-C++",
"bytes": "33832"
},
{
"name": "Perl",
"bytes": "1821585"
},
{
"name": "Python",
"bytes": "55638"
},
{
"name": "Shell",
"bytes": "12070"
}
],
"symlink_target": ""
} |
A simple and VERY compact bootsector, which lets you load your bootloader or kernel from a file.
Build using attached script.
Notice: Don't read this, read the source code. It is commented, so all the info is there
## What does this bootsector actually do?
Here it is, step by step:
1. Check if BIOS supports extended Disk operation
2. Looks for first active FAT32 partition
3. Looks for your file on that partition(in root directory)
4. Loads it into memory
5. Executes it
## State of memory after my bootsector
### Your Code:
Your code is loaded at 0x1000:0x0000(segment:offset), or 0x10000(linear).
### Registers
Most of registers **are not "zeroed" and garbage is assigned to them**
**Only those have valid values:**
+ CS:IP - 0x1000:0x0000 (assigned to loaded file)
+ DS - 0x1000 (also assigned to loaded file)
+ SS:SP - 0x0000:0xFFFF (stack in first segment)
+ DL - drive number(for bios interrupts)
### Useful data left in memory
#### after jumping to loaded file there is still some useful data left in memory:
**(all offsets are specified in source code)**
+ 0x7C00 - current disk(device number), FAT address, first data sector address and similar info.
+ 0x7E00 - BPB sector of that partition
## Limitations
+ It's all in real mode so maximum size of file shouldn't be bigger than a few hundred Kilobytes
+ File must be in root directory of selected partition
+ Filename - Bootsector searches for file with 8.3 filename
## Install methods
To install use:
+ On ubuntu compatible systems use Install.sh script
+ On Windows use Fat32-bootsector.exe
**Notice** Windows version does not use letters for partitions, but phisycal partitions(not logical), so:
It may be more difficult to match partition-letter, but on the other hand it supports **multi partition usb drives**
## Errors?
When error occurs bootsector draws error code on the screen.
### Here are codes and explanations of errors:
0 - extended BIOS functions not availble
1 - Bootable partition not found
2 - Bootable partition is not FAT32
3 - File not found
4 - Can't load sectors (BIOS interrupt error)
| {
"content_hash": "dc2e062b449a58c8f62bcda1f111c650",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 116,
"avg_line_length": 29.633802816901408,
"alnum_prop": 0.752851711026616,
"repo_name": "TebexPL/FAT32-Bootsector",
"id": "d06c9ab07c5c90acc45ffc80fbfafa8d3b1fa2eb",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6465"
},
{
"name": "C++",
"bytes": "13929"
},
{
"name": "Shell",
"bytes": "4010"
}
],
"symlink_target": ""
} |
<?xml version="1.0"?>
<project name="bundleN1_test" default="all">
<dirname property="proj.dir" file="${ant.file.bundleN1_test}"/>
<import file="${proj.dir}/../../../test_import.xml"/>
<property name="bundle.build.all" value="false"/>
<property name="bundle.build.api" value="false"/>
<property name="bundle.build.lib" value="true"/>
<property name="bundle.build.impl" value="false"/>
<property name="bundle.build.doc" value="false"/>
<path id="bundle.compile.path">
</path>
<import file="${ant.dir}/bundlebuild.xml"/>
</project>
| {
"content_hash": "f1af085215fb35f9a480506ecaf19f29",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 29.526315789473685,
"alnum_prop": 0.6488413547237076,
"repo_name": "knopflerfish/knopflerfish.org",
"id": "cdda2bca2250701ab29110fe4996886e33d768b2",
"size": "561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osgi/bundles_test/regression_tests/framework_test/test_target_bundles/bundleN1_test/build.xml",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "30451"
},
{
"name": "HTML",
"bytes": "402685"
},
{
"name": "Java",
"bytes": "12620016"
},
{
"name": "Shell",
"bytes": "6245"
},
{
"name": "XSLT",
"bytes": "25470"
}
],
"symlink_target": ""
} |
<meta http-equiv="refresh" content="0;url=../index.html#artboard189"> | {
"content_hash": "bbec83d22ad9fcdb3cacd30ae71d6356",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 69,
"avg_line_length": 69,
"alnum_prop": 0.7391304347826086,
"repo_name": "NewDadaFE/NewDadaFE.github.io",
"id": "d978caff80478ea4e9438ca3473531371c2666fd",
"size": "69",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UED/links/symbols-uinavigationbar2.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1108889"
}
],
"symlink_target": ""
} |
'use strict';
var mongoose = require('mongoose');
var Schema = mongoose.Schema;
var ObjectId = Schema.ObjectId;
var relationship = require('mongoose-relationship');
var fields = {
name: { type: String },
activityName: { type: String },
description: { type: String },
linkUrl: { type: String },
pictureUrl: { type: String },
category: { type: String },
airportCode: { type: String },
address: { type: String },
team: { type: ObjectId, ref: 'Team', childPath: 'activities' },
};
var ActivitySchema = new Schema(fields);
ActivitySchema.plugin(relationship, {relationshipPathName: 'team'});
module.exports = mongoose.model('Activity', ActivitySchema);
| {
"content_hash": "8e13a63c49500f5cf915ddab8da85c9b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 68,
"avg_line_length": 27.958333333333332,
"alnum_prop": 0.6885245901639344,
"repo_name": "davidcawton/road-games-api",
"id": "1aa561c02ca2c589e5dc1d3e723005c947941d43",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models/activity.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "87"
},
{
"name": "JavaScript",
"bytes": "30082"
}
],
"symlink_target": ""
} |
/** \file TDecSbac.h
\brief SBAC decoder class (header)
*/
#ifndef __TDECSBAC__
#define __TDECSBAC__
#if _MSC_VER > 1000
#pragma once
#endif // _MSC_VER > 1000
#include "TDecEntropy.h"
#include "TDecBinCoder.h"
#include "TLibCommon/ContextTables.h"
#include "TLibCommon/ContextModel.h"
#include "TLibCommon/ContextModel3DBuffer.h"
//! \ingroup TLibDecoder
//! \{
// ====================================================================================================================
// Class definition
// ====================================================================================================================
class SEImessages;
/// SBAC decoder class
class TDecSbac : public TDecEntropyIf
{
public:
TDecSbac();
virtual ~TDecSbac();
Void init ( TDecBinIf* p ) { m_pcTDecBinIf = p; }
Void uninit ( ) { m_pcTDecBinIf = 0; }
Void resetEntropy ( TComSlice* pcSlice );
Void setBitstream ( TComInputBitstream* p ) { m_pcBitstream = p; m_pcTDecBinIf->init( p ); }
Void setAlfCtrl ( Bool bAlfCtrl ) { m_bAlfCtrl = bAlfCtrl; }
Void setMaxAlfCtrlDepth ( UInt uiMaxAlfCtrlDepth ) { m_uiMaxAlfCtrlDepth = uiMaxAlfCtrlDepth; }
Void parseSPS ( TComSPS* pcSPS ) {}
Void parsePPS ( TComPPS* pcPPS ) {}
void parseSEI(SEImessages&) {}
Void parseSliceHeader ( TComSlice*& rpcSlice ) {}
Void parseTerminatingBit ( UInt& ruiBit );
Void parseMVPIdx ( TComDataCU* pcCU, Int& riMVPIdx, Int iMVPNum, UInt uiAbsPartIdx, UInt uiDepth, RefPicList eRefList );
Void parseAlfFlag ( UInt& ruiVal );
Void parseAlfUvlc ( UInt& ruiVal );
Void parseAlfSvlc ( Int& riVal );
Void parseAlfCtrlDepth ( UInt& ruiAlfCtrlDepth );
#if MTK_SAO
Void parseAoFlag ( UInt& ruiVal );
Void parseAoUvlc ( UInt& ruiVal );
Void parseAoSvlc ( Int& riVal );
#endif
private:
Void xReadUnarySymbol ( UInt& ruiSymbol, ContextModel* pcSCModel, Int iOffset );
Void xReadUnaryMaxSymbol ( UInt& ruiSymbol, ContextModel* pcSCModel, Int iOffset, UInt uiMaxSymbol );
Void xReadEpExGolomb ( UInt& ruiSymbol, UInt uiCount );
Void xReadGoRiceExGolomb ( UInt &ruiSymbol, UInt &ruiGoRiceParam );
#if !MODIFIED_MVD_CODING
#if MVD_CTX
Void xReadMvd ( Int& riMvdComp, UInt uiAbsSumL, UInt uiAbsSumA, UInt uiCtx );
#else
Void xReadMvd ( Int& riMvdComp, UInt uiAbsSum, UInt uiCtx );
#endif
Void xReadExGolombMvd ( UInt& ruiSymbol, ContextModel* pcSCModel, UInt uiMaxBin );
#endif
private:
TComInputBitstream* m_pcBitstream;
TDecBinIf* m_pcTDecBinIf;
Bool m_bAlfCtrl;
UInt m_uiMaxAlfCtrlDepth;
#if FINE_GRANULARITY_SLICES && MTK_NONCROSS_INLOOP_FILTER
Int m_iSliceGranularity; //!< slice granularity
#endif
public:
Void parseAlfCtrlFlag ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parseAlfFlagNum ( UInt& ruiVal, UInt minValue, UInt depth );
Void parseAlfCtrlFlag ( UInt &ruiAlfCtrlFlag );
#if FINE_GRANULARITY_SLICES && MTK_NONCROSS_INLOOP_FILTER
/// set slice granularity
Void setSliceGranularity(Int iSliceGranularity) {m_iSliceGranularity = iSliceGranularity;}
/// get slice granularity
Int getSliceGranularity() {return m_iSliceGranularity; }
#endif
Void parseSkipFlag ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parseSplitFlag ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parseMergeFlag ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, UInt uiPUIdx );
Void parseMergeIndex ( TComDataCU* pcCU, UInt& ruiMergeIndex, UInt uiAbsPartIdx, UInt uiDepth );
Void parsePartSize ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parsePredMode ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parseIntraDirLumaAng( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parseIntraDirChroma( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parseInterDir ( TComDataCU* pcCU, UInt& ruiInterDir, UInt uiAbsPartIdx, UInt uiDepth );
Void parseRefFrmIdx ( TComDataCU* pcCU, Int& riRefFrmIdx, UInt uiAbsPartIdx, UInt uiDepth, RefPicList eRefList );
Void parseMvd ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiPartIdx, UInt uiDepth, RefPicList eRefList );
Void parseTransformSubdivFlag( UInt& ruiSubdivFlag, UInt uiLog2TransformBlockSize );
Void parseQtCbf ( TComDataCU* pcCU, UInt uiAbsPartIdx, TextType eType, UInt uiTrDepth, UInt uiDepth );
Void parseQtRootCbf ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth, UInt& uiQtRootCbf );
Void parseDeltaQP ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth );
Void parseCbf ( TComDataCU* pcCU, UInt uiAbsPartIdx, TextType eType, UInt uiTrDepth, UInt uiDepth ) {}
Void parseBlockCbf ( TComDataCU* pcCU, UInt uiAbsPartIdx, TextType eType, UInt uiTrDepth, UInt uiDepth, UInt uiQPartNum ) {}
#if E057_INTRA_PCM
Void parseIPCMInfo ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiDepth);
#endif
#if CAVLC_RQT_CBP
Void parseCbfTrdiv ( TComDataCU* pcCU, UInt uiAbsPartIdx, UInt uiTrDepth, UInt uiDepth, UInt& uiSubdiv ) {}
#endif
__inline Void parseLastSignificantXY( UInt& uiPosLastX, UInt& uiPosLastY, const UInt uiWidth, const TextType eTType, const UInt uiCTXIdx, const UInt uiScanIdx );
Void parseCoeffNxN ( TComDataCU* pcCU, TCoeff* pcCoef, UInt uiAbsPartIdx, UInt uiWidth, UInt uiHeight, UInt uiDepth, TextType eTType );
private:
UInt m_uiLastDQpNonZero;
UInt m_uiLastQp;
ContextModel m_contextModels[MAX_NUM_CTX_MOD];
Int m_numContextModels;
ContextModel3DBuffer m_cCUSplitFlagSCModel;
ContextModel3DBuffer m_cCUSkipFlagSCModel;
ContextModel3DBuffer m_cCUMergeFlagExtSCModel;
ContextModel3DBuffer m_cCUMergeIdxExtSCModel;
ContextModel3DBuffer m_cCUPartSizeSCModel;
ContextModel3DBuffer m_cCUPredModeSCModel;
ContextModel3DBuffer m_cCUAlfCtrlFlagSCModel;
ContextModel3DBuffer m_cCUIntraPredSCModel;
#if ADD_PLANAR_MODE && !FIXED_MPM
ContextModel3DBuffer m_cPlanarFlagSCModel;
#endif
ContextModel3DBuffer m_cCUChromaPredSCModel;
ContextModel3DBuffer m_cCUDeltaQpSCModel;
ContextModel3DBuffer m_cCUInterDirSCModel;
ContextModel3DBuffer m_cCURefPicSCModel;
ContextModel3DBuffer m_cCUMvdSCModel;
ContextModel3DBuffer m_cCUQtCbfSCModel;
ContextModel3DBuffer m_cCUTransSubdivFlagSCModel;
ContextModel3DBuffer m_cCUQtRootCbfSCModel;
ContextModel3DBuffer m_cCUSigSCModel;
ContextModel3DBuffer m_cCuCtxLastX;
ContextModel3DBuffer m_cCuCtxLastY;
ContextModel3DBuffer m_cCUOneSCModel;
ContextModel3DBuffer m_cCUAbsSCModel;
ContextModel3DBuffer m_cMVPIdxSCModel;
ContextModel3DBuffer m_cALFFlagSCModel;
ContextModel3DBuffer m_cALFUvlcSCModel;
ContextModel3DBuffer m_cALFSvlcSCModel;
#if AMP
ContextModel3DBuffer m_cCUXPosiSCModel;
ContextModel3DBuffer m_cCUYPosiSCModel;
#endif
#if MTK_SAO
ContextModel3DBuffer m_cAOFlagSCModel;
ContextModel3DBuffer m_cAOUvlcSCModel;
ContextModel3DBuffer m_cAOSvlcSCModel;
#endif
};
//! \}
#endif // !defined(AFX_TDECSBAC_H__CFCAAA19_8110_47F4_9A16_810C4B5499D5__INCLUDED_)
| {
"content_hash": "1b0d4651e5f236e42a775f5502d6c422",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 163,
"avg_line_length": 41.1063829787234,
"alnum_prop": 0.6595496894409938,
"repo_name": "linqiaozhou/GitlHEVCAnalyzer",
"id": "032279d1e148339014a1335096ab1879e558e718",
"size": "9593",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "appgitlhevcdecoder/HM-4.0/source/Lib/TLibDecoder/TDecSbac.h",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "399071"
},
{
"name": "C++",
"bytes": "6365741"
},
{
"name": "Prolog",
"bytes": "6234"
},
{
"name": "Shell",
"bytes": "89867"
},
{
"name": "TeX",
"bytes": "114468"
}
],
"symlink_target": ""
} |
<?php
namespace Maruamyu\Core\OAuth1;
use Maruamyu\Core\Http\Driver\DriverFactory;
use Maruamyu\Core\Http\Driver\DriverInterface;
use Maruamyu\Core\Http\Message\QueryString;
use Maruamyu\Core\Http\Message\Request;
use Maruamyu\Core\Http\Message\Response;
use Maruamyu\Core\Http\Message\UriInterface;
/**
* OAuth 1.0 Client
*/
class Client extends CoreLogic
{
/**
* @var DriverFactory
*/
protected $httpDriverFactory = null;
/**
* execute HTTP request with OAuth signature
*
* @param string $method HTTP method
* @param string|UriInterface $uri URL
* @param array|QueryString $params form data
* @param bool $notUseAuthorizationHeader if true, then auth-params into QUERY_STRING or form data
* @return Response response message
* @throws \InvalidArgumentException if invalid args
*/
public function doRequest($method, $uri, $params = null, $notUseAuthorizationHeader = false)
{
$httpRequest = $this->makeRequest($method, $uri, $params, $notUseAuthorizationHeader);
$httpDriver = $this->getHttpDriver($httpRequest);
return $httpDriver->execute();
}
/**
* @param Request $request HTTP request message
* @return DriverInterface HTTP driver class
*/
protected function getHttpDriver(Request $request = null)
{
if (!$this->httpDriverFactory) {
$this->httpDriverFactory = new DriverFactory();
}
return $this->httpDriverFactory->getDriver($request);
}
}
| {
"content_hash": "42f118437b64fc1a3e5af1de1cc23645",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 102,
"avg_line_length": 30.46,
"alnum_prop": 0.6808929743926461,
"repo_name": "maruamyu/php-lib",
"id": "220b1e20985578eedf3322888fb871632f6cdd03",
"size": "1523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Maruamyu/Core/OAuth1/Client.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "479932"
}
],
"symlink_target": ""
} |
AwsSnsKit is a solution for integrating Amazon Web Service(AWS) Simple Notification Service(SNS) into a Rails application.
## Installation
Add this line to your application's Gemfile:
gem 'aws_sns_kit'
And then execute:
$ bundle
Or install it yourself as:
$ gem install aws_sns_kit
## Configuration
Before using aws_sns_kit, you need to generate files using a generator.
Specify your model as first argument.
```ruby
rails generate aws_sns_kit:install MODEL_NAME
```
ex.
```ruby
rails generate aws_sns_kit:install User
```
This generator command create following files.
```ruby
config/initializers/aws_sns_kit.rb
db/migrate/20140715070941_add_aws_sns_kit_to_users.rb
```
Then, migrate.
```ruby
rake db:migrate
```
Also, you need to configure your initializer file.
Following configuration is for using aws-sdk.
More information here. https://github.com/aws/aws-sdk-ruby
```ruby
#config/initializers/aws_sns_kit.rb
AwsSnsKit.configure do |config|
config.access_key_id = 'your access_key_id here'
config.secret_access_key = 'your secret_access_key here'
config.region = 'your region here'
#specify PlatformApplicationArn respectively.
config.end_point = {
apns: '',
apns_sandbox: '',
gcm: ''
}
end
```
## How to use?
#### APNS(Apple Push Notification Service)
You can use `push_notify` instance method with your model instance to send push notification via AWS SNS.
Before using this method, make sure that your model includes Notifier module from aws_sns_kit.
```ruby
class User < ActiveRecord::Base
include AwsSnsKit::Notifier
end
```
Create a notification payload hash and send it by applying argument to `push_notify` method.
```ruby
user = User.first
notification = {
alert: 'Message received via aws_sns_kit',
sound: 'bingbong.aiff',
badge: 2,
content_available: 1,
custom_data: { awesome: 'something' }
}
user.push_notify(notification)
```
##### NOTE:
Since AWS SNS can send a notification to multiple platform, aws_sns_kit expect your MODEL to return the name of the platform when `sns_platform` method is called.
Specify your platform name from one of the following to your MODEL's attributes or instance method.
`[:apns, :apns_sandbox, :gcm]`
```ruby
# Set platform using model attribute
pry(main)> User.first.sns_platform
=> :apns
# Or specify it using instance method
class User < ActiveRecord::Base
def sns_platform
:apns
end
end
```
#### GCM(Google Cloud Messaging)
Work in progress now.
#### Topics & Subscription
Work in progress now.
## Contributing
1. Fork it ( https://github.com/awingla/aws_sns_kit/fork )
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create a new Pull Request
| {
"content_hash": "b96b420daeb048b974a55e5c3d46b8b3",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 162,
"avg_line_length": 21,
"alnum_prop": 0.7163712200208551,
"repo_name": "awingla/aws_sns_kit",
"id": "36549c6b846083671fa4fecb0f5a193e1e0b5a8c",
"size": "2908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "16643"
}
],
"symlink_target": ""
} |
layout: post
title: Starting GSoC
---
Selected for Google Summer of Code 2016.<br>
I would be working on Journal Rethink project of Sugar Labs under the guidance of Sam P. and Walter Bender.

| {
"content_hash": "27e40e940395e63e6a39af7581427c7f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 107,
"avg_line_length": 30.444444444444443,
"alnum_prop": 0.7627737226277372,
"repo_name": "AbrahmAB/AbrahmAB.github.io",
"id": "6f9c67a1e939702a116e9e6e0cf1067366d7d070",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_posts/2016-5-14-Starting-GSOC.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62845"
},
{
"name": "HTML",
"bytes": "6266"
}
],
"symlink_target": ""
} |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.amazonaws.mturk.model;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
*
* @author Jeremy Custenborder
*/
public class IsNumericConstraintTest extends BaseTest {
IsNumericConstraint constraint;
@Before
public void setup() {
SerializationHelper.FORMAT_OUTPUT = false;
constraint = new IsNumericConstraint();
}
// @Test
// public void test() throws IOException {
// SerializationHelper.save(constraint, System.out);
//
// final String expected = "<IsNumeric xmlns=\"http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd\"/>";
// String actual = SerializationHelper.toString(constraint);
//
// Assert.assertEquals(expected, actual);
// }
}
| {
"content_hash": "432b8b78ebbee07bca9170251bafd33a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 148,
"avg_line_length": 26.916666666666668,
"alnum_prop": 0.7327141382868937,
"repo_name": "ScarceMedia/java-aws-mturk-model",
"id": "0022c23de0bedc7522bc752df1bc4c494efc346b",
"size": "969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/java/com/amazonaws/mturk/model/IsNumericConstraintTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "58772"
}
],
"symlink_target": ""
} |
@extends('layouts.master')
@section('title')
{{ trans('navigation.roles') }}
@stop
@section('content')
<h1>{{ trans('navigation.roles') }}</h1>
<h2>{{ trans('messages.editing')}} {{ trans('auth.of_role')}} "{{ $role->name}}"</h2>
<p><a href="{{ LaravelLocalization::localizeURL('/role/'.$role->id) }}">{{ trans('messages.back_to_show') }}</a></p>
{!! Form::model($role, array('method'=>'PUT', 'route' => array('role.update', $role->id))) !!}
@include('role._form_create_edit', ['submit_title' => trans('messages.save'),
'action' => 'edit'])
{!! Form::close() !!}
@stop | {
"content_hash": "c3725dd40ad0476c4fb2cc18236b4cac",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 124,
"avg_line_length": 41.5625,
"alnum_prop": 0.512781954887218,
"repo_name": "lunata/confform",
"id": "d4e49d98f592135317e6ada0515227095ca023f8",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/views/role/edit.blade.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "553"
},
{
"name": "HTML",
"bytes": "290922"
},
{
"name": "PHP",
"bytes": "262584"
},
{
"name": "Vue",
"bytes": "563"
}
],
"symlink_target": ""
} |
package com.xiaofeng.utils.designpatterns.builder;
/**
* @author Chen Xiaofeng
* @version 1.0.0
* @date 2017/07/17
* @email chenxf84@gmail.com
*/
public class AngelBuilder extends ActorBuilder{
public void buildType() {
actor.setType("天使");
}
public void buildSex() {
actor.setSex("女");
}
public void buildFace() {
actor.setFace("漂亮");
}
public void buildCostume() {
actor.setCostume("白裙");
}
public void buildHairstyle() {
actor.setHairstyle("披肩长发");
}
}
| {
"content_hash": "ed85215977762d25aa81ba6d29a5e7b7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 50,
"avg_line_length": 17.677419354838708,
"alnum_prop": 0.5967153284671532,
"repo_name": "nellochen/springboot-start",
"id": "320bb88b69821ced3b6a66a85f6cd1d1e4a0b0c5",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/src/main/java/com/xiaofeng/utils/designpatterns/builder/AngelBuilder.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "55066"
},
{
"name": "Java",
"bytes": "207415"
},
{
"name": "Shell",
"bytes": "77638"
}
],
"symlink_target": ""
} |
pytest-4.6.11
=======================================
pytest 4.6.11 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
Thanks to all who contributed to this release, among them:
* Anthony Sottile
* Bruno Oliveira
* Sviatoslav Sydorenko
Happy testing,
The pytest Development Team
| {
"content_hash": "c1d46df0ff64ef4ceae3a96864ee8a3f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 84,
"avg_line_length": 22.65,
"alnum_prop": 0.6997792494481236,
"repo_name": "kawamon/hue",
"id": "276584bdf523555780274060d333cd28412c27bc",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pytest-4.6.11/doc/en/announce/release-4.6.11.rst",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
package io.maxthomas.dictum;
import java.util.List;
import java.util.Optional;
import org.inferred.freebuilder.FreeBuilder;
/**
* Class, extending {@link FlatTokenGrouping}, that represents
* a way to point to specific tokens inside a structure.
* This interface provides an additional set of methods that obviate the
* need for construction of pointers, crawling through tokenizations, etc.
* <br><br>
* One can consume this interface's methods to get wrapper objects that
* allow direct retrieval of the tokens, their textspans,
* and their texts.
*/
@FreeBuilder
public abstract class DictumTokenGrouping extends FlatTokenGrouping {
public abstract List<Token> getTokens();
public abstract Optional<Token> getAnchorToken();
public abstract Tokenization getTokenization();
public static class Builder extends DictumTokenGrouping_Builder { }
DictumTokenGrouping() { }
}
| {
"content_hash": "93b117b0c70958a48348a84c02afb63b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 29,
"alnum_prop": 0.7741935483870968,
"repo_name": "maxthomas/dictum",
"id": "c4d2ff0af42289e9c050fb8502b6ce38b0f1730a",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/java/io/maxthomas/dictum/DictumTokenGrouping.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "49621"
}
],
"symlink_target": ""
} |
#include <StdAfx.h>
#include "VssAsyncResult.h"
namespace Alphaleonis { namespace Win32 { namespace Vss
{
VssAsyncResult::VssAsyncResult(::IVssAsync *vssAsync, AsyncCallback^ userCallback, Object^ asyncState)
: m_isComplete(0), m_asyncCallback(userCallback), m_asyncState(asyncState), m_asyncWaitHandle(nullptr), m_vssAsync(vssAsync), m_exception(nullptr)
{
if (!ThreadPool::QueueUserWorkItem(gcnew WaitCallback(this, &VssAsyncResult::WaitForAsyncCompletion)))
{
throw gcnew Exception(L"ThreadPool::QueueUserWorkItem failed.");
}
}
void VssAsyncResult::WaitForAsyncCompletion(Object^ state)
{
HRESULT hrResult;
hrResult = m_vssAsync->Wait();
int prevState = Interlocked::Exchange(m_isComplete, -1);
if (prevState != 0)
throw gcnew InvalidOperationException("WaitForAsyncCompletion can only be called once.");
if (SUCCEEDED(hrResult))
{
HRESULT hr = m_vssAsync->QueryStatus(&hrResult, NULL);
if (FAILED(hr))
hrResult = hr;
}
if (FAILED(hrResult))
m_exception = GetExceptionForHr(hrResult);
else if (hrResult == VSS_S_ASYNC_CANCELLED)
m_exception = gcnew OperationCanceledException();
if (m_asyncWaitHandle != nullptr)
m_asyncWaitHandle->Set();
if (m_asyncCallback != nullptr)
m_asyncCallback(this);
}
void VssAsyncResult::EndInvoke()
{
// This method assumes that only 1 thread calls EndInvoke
// for this object
if (!IsCompleted)
{
AsyncWaitHandle->WaitOne();
AsyncWaitHandle->Close();
m_asyncWaitHandle = nullptr;
}
if (m_exception != nullptr)
throw m_exception;
}
Object^ VssAsyncResult::AsyncState::get()
{
return m_asyncState;
}
bool VssAsyncResult::CompletedSynchronously::get()
{
return false;
}
WaitHandle^ VssAsyncResult::AsyncWaitHandle::get()
{
if (m_asyncWaitHandle == nullptr)
{
bool done = IsCompleted;
ManualResetEvent^ ev = gcnew ManualResetEvent(done);
if (Interlocked::CompareExchange<ManualResetEvent^>(m_asyncWaitHandle, ev, nullptr) != nullptr)
{
ev->Close();
}
else
{
if (!done && IsCompleted)
{
m_asyncWaitHandle->Set();
}
}
}
return m_asyncWaitHandle;
}
bool VssAsyncResult::IsCompleted::get()
{
return Thread::VolatileRead(m_isComplete) != 0;
}
VssAsyncResult^ VssAsyncResult::Create(::IVssAsync *vssAsync, AsyncCallback^ userCallback, Object^ asyncState)
{
try
{
return gcnew VssAsyncResult(vssAsync, userCallback, asyncState);
}
catch (...)
{
vssAsync->Release();
throw;
}
}
VssAsyncResult::~VssAsyncResult()
{
this->!VssAsyncResult();
}
VssAsyncResult::!VssAsyncResult()
{
if (m_vssAsync != 0)
{
m_vssAsync->Release();
m_vssAsync = 0;
}
}
void VssAsyncResult::Cancel()
{
}
}}} | {
"content_hash": "246ce3bf90fe645f37c84d4eb2f3bebf",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 152,
"avg_line_length": 24.767441860465116,
"alnum_prop": 0.5899843505477308,
"repo_name": "modulexcite/AlphaVSS",
"id": "e59c789aafa977ddb92559f28c144266a4c62d23",
"size": "4335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/AlphaVSS.Platform/Source/VssAsyncResult.cpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6842"
},
{
"name": "C#",
"bytes": "871915"
},
{
"name": "C++",
"bytes": "160663"
},
{
"name": "Objective-C",
"bytes": "3673"
}
],
"symlink_target": ""
} |
/* globals expect */
import Store from 'common/src/data/notification/notification-store';
import Type from 'common/src/data/notification/notification-types';
describe('The redux notification store', () => {
it('should return an empty array without action', () => {
let state = Store();
expect(state.length).to.be.defined;
expect(state.length).to.equal(0);
});
it('should receive a notification', () => {
let state = Store([], {
type: Type.ADD_NOTIFICATION,
payload: ['Hello']
});
expect(state.length).equals(1);
let msg = state[0];
expect(msg.type).to.equal('default');
expect(msg.created).to.be.defined;
expect(msg.message).to.equal('Hello');
});
it('should return the same state if there is nothing to remove', () => {
const state = [],
newState = (Store(state, {
type: Type.REMOVE_NOTIFICATIONS_OLDER_THAN,
payload: 5000
}));
expect(state === newState).to.be.true;
});
});
| {
"content_hash": "2446ec5796a5c2b3d516593806db2191",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 32.84848484848485,
"alnum_prop": 0.5627306273062731,
"repo_name": "zalando-stups/yourturn",
"id": "6da260e0ea1dfd718b8e9e3c38bf6ef85bd6db4e",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/lib/common/test/notification/notification-store.test.js",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38202"
},
{
"name": "Dockerfile",
"bytes": "385"
},
{
"name": "HTML",
"bytes": "7634"
},
{
"name": "JavaScript",
"bytes": "708839"
},
{
"name": "Shell",
"bytes": "865"
}
],
"symlink_target": ""
} |
export * from './arrays';
export * from './objects';
export * from './types';
| {
"content_hash": "1eab3d24f662d1fd2b00014e40651c03",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 26,
"alnum_prop": 0.6153846153846154,
"repo_name": "iamatypeofwalrus/GlasswavesCo",
"id": "d3781a0fcc33e11c125a63474d801053899bd9e1",
"size": "78",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdk/node_modules/@aws-cdk/util/lib/index.d.ts",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132526"
},
{
"name": "HTML",
"bytes": "2138"
}
],
"symlink_target": ""
} |
'''
clicknupload urlresolver plugin
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from lib import helpers
from lib import captcha_lib
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
MAX_TRIES = 3
class ClickNUploadResolver(UrlResolver):
name = "clicknupload"
domains = ['clicknupload.com', 'clicknupload.me', 'clicknupload.link', 'clicknupload.org']
pattern = '(?://|\.)(clicknupload\.(?:com|me|link|org))/(?:f/)?([0-9A-Za-z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {
'User-Agent': common.FF_USER_AGENT,
'Referer': web_url
}
html = self.net.http_GET(web_url, headers=headers).content
tries = 0
while tries < MAX_TRIES:
data = helpers.get_hidden(html)
data.update(captcha_lib.do_captcha(html))
html = self.net.http_POST(web_url, data, headers=headers).content
r = re.search('''class="downloadbtn"[^>]+onClick\s*=\s*\"window\.open\('([^']+)''', html)
if r:
return r.group(1) + helpers.append_headers(headers)
if tries > 0:
common.kodi.sleep(1000)
tries = tries + 1
raise ResolverError('Unable to locate link')
def get_url(self, host, media_id):
return 'https://clicknupload.org/%s' % media_id
@classmethod
def isPopup(self):
return True
| {
"content_hash": "b4af6603715bc00848710e4830111992",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 101,
"avg_line_length": 34.82258064516129,
"alnum_prop": 0.6470588235294118,
"repo_name": "dbiesecke/dbiesecke.github.io",
"id": "c2213ca14cf88852ba43f74234e5871b9702c1ca",
"size": "2159",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "repo/script.module.urlresolver/lib/urlresolver/plugins/clicknupload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23106"
},
{
"name": "HTML",
"bytes": "1689379"
},
{
"name": "JavaScript",
"bytes": "103456"
},
{
"name": "Makefile",
"bytes": "4554"
},
{
"name": "Perl",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "14200477"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
package mmarquee.automation.controls;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import mmarquee.automation.Element;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import mmarquee.automation.BaseAutomationTest;
import mmarquee.automation.ControlType;
import mmarquee.automation.ElementNotFoundException;
import mmarquee.automation.pattern.SelectionItem;
import mmarquee.uiautomation.TreeScope;
/**
* Tests for Tab.
*
* @author Mark Humphreys
* Date 29/11/2016.
*/
public class AutomationTabTest {
@BeforeClass
public static void checkOs() throws Exception {
Assume.assumeTrue(isWindows());
}
private static boolean isWindows() {
return System.getProperty("os.name").toLowerCase().contains("windows");
}
@Mock
Element element;
Tab automationTab;
List<Element> list;
@Mock
Element targetElement;
@Before
public void setup() throws Exception {
MockitoAnnotations.initMocks(this);
automationTab = Mockito.spy(new Tab(new ElementBuilder(element)));
list = new ArrayList<>();
list.add(targetElement);
}
static {
ClassLoader.getSystemClassLoader().setDefaultAssertionStatus(true);
}
@Test
public void test_GetTabItems_Returns_Items() throws Exception {
when(element.findAll(BaseAutomationTest.isTreeScope(TreeScope.DESCENDANTS), any())).thenReturn(list);
List<TabItem> tabItems = automationTab.getTabItems();
assertEquals(1,tabItems.size());
assertEquals(targetElement,tabItems.get(0).getElement());
verify(automationTab).createControlTypeCondition(ControlType.TabItem);
verify(element, atLeastOnce()).findAll(any(), any());
}
@Test
public void test_SelectTabPage_By_Name_Succeeds_When_Tab_Present() throws Exception {
when(element.findAll(BaseAutomationTest.isTreeScope(TreeScope.DESCENDANTS), any())).thenReturn(list);
when(targetElement.getName()).thenReturn("TEST-01");
SelectionItem mockSelectItemPattern = BaseAutomationTest.mockSelectItemPattern(targetElement);
automationTab.selectTabPage("TEST-01");
verify(mockSelectItemPattern).select();
verify(automationTab).createControlTypeCondition(ControlType.TabItem);
verify(element, atLeastOnce()).findAll(any(), any());
}
@Test(expected = ElementNotFoundException.class)
public void test_SelectTabPage_By_Name_Throws_Exception_When_Tab_Not_Present() throws Exception {
when(element.findAll(BaseAutomationTest.isTreeScope(TreeScope.DESCENDANTS), any())).thenReturn(list);
when(targetElement.getName()).thenReturn("TEST-01");
automationTab.selectTabPage("TEST");
}
@Test
public void test_SelectTabPage_By_Name_with_RegExPattern_Succeeds_When_Tab_Present() throws Exception {
when(element.findAll(BaseAutomationTest.isTreeScope(TreeScope.DESCENDANTS), any())).thenReturn(list);
when(targetElement.getName()).thenReturn("TEST-01");
SelectionItem mockSelectItemPattern = BaseAutomationTest.mockSelectItemPattern(targetElement);
automationTab.selectTabPage(Pattern.compile("TEST-\\d{2,3}"));
verify(mockSelectItemPattern).select();
verify(automationTab).createControlTypeCondition(ControlType.TabItem);
verify(element, atLeastOnce()).findAll(any(), any());
}
@Test(expected = ElementNotFoundException.class)
public void test_SelectTabPage_By_Name_with_RegExPattern_Throws_Exception_When_Tab_Not_Present() throws Exception {
when(element.findAll(BaseAutomationTest.isTreeScope(TreeScope.DESCENDANTS), any())).thenReturn(list);
when(targetElement.getName()).thenReturn("TEST-01");
automationTab.selectTabPage(Pattern.compile("test.*"));
}
}
| {
"content_hash": "cd23f644d6483e95492df3b7c4e534a7",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 119,
"avg_line_length": 34.62096774193548,
"alnum_prop": 0.7148846960167715,
"repo_name": "mmarquee/ui-automation",
"id": "ee8f52afcb52a1f085130343f948678ac9fef23b",
"size": "4901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/java/mmarquee/automation/controls/AutomationTabTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1511227"
},
{
"name": "Scala",
"bytes": "7374"
}
],
"symlink_target": ""
} |
package pcserror
import (
"github.com/iikira/BaiduPCS-Go/pcsutil/jsonhelper"
"io"
)
type (
// ErrType 错误类型
ErrType int
// Error 错误信息接口
Error interface {
error
SetJSONError(err error)
SetNetError(err error)
SetRemoteError()
GetOperation() string
GetErrType() ErrType
GetRemoteErrCode() int
GetRemoteErrMsg() string
GetError() error
}
)
const (
// ErrorTypeNoError 无错误
ErrorTypeNoError ErrType = iota
// ErrTypeInternalError 内部错误
ErrTypeInternalError
// ErrTypeRemoteError 远端服务器返回错误
ErrTypeRemoteError
// ErrTypeNetError 网络错误
ErrTypeNetError
// ErrTypeJSONParseError json 数据解析失败
ErrTypeJSONParseError
// ErrTypeOthers 其他错误
ErrTypeOthers
)
const (
// StrSuccess 操作成功
StrSuccess = "操作成功"
// StrInternalError 内部错误
StrInternalError = "内部错误"
// StrRemoteError 远端服务器返回错误
StrRemoteError = "远端服务器返回错误"
// StrNetError 网络错误
StrNetError = "网络错误"
// StrJSONParseError json 数据解析失败
StrJSONParseError = "json 数据解析失败"
)
// DecodePCSJSONError 解析PCS JSON的错误
func DecodePCSJSONError(opreation string, data io.Reader) Error {
errInfo := NewPCSErrorInfo(opreation)
return HandleJSONParse(opreation, data, errInfo)
}
// DecodePanJSONError 解析Pan JSON的错误
func DecodePanJSONError(opreation string, data io.Reader) Error {
errInfo := NewPanErrorInfo(opreation)
return HandleJSONParse(opreation, data, errInfo)
}
// HandleJSONParse 处理解析json
func HandleJSONParse(op string, data io.Reader, info interface{}) (pcsError Error) {
var (
err = jsonhelper.UnmarshalData(data, info)
errInfo = info.(Error)
)
if errInfo == nil {
errInfo = NewPCSErrorInfo(op)
}
if err != nil {
errInfo.SetJSONError(err)
return errInfo
}
// 设置出错类型为远程错误
if errInfo.GetRemoteErrCode() != 0 {
errInfo.SetRemoteError()
return errInfo
}
return nil
}
| {
"content_hash": "a411156bab47738d8c555cb48e4d435e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 84,
"avg_line_length": 20,
"alnum_prop": 0.748314606741573,
"repo_name": "iikira/BaiduPCS-Go",
"id": "a851a9d0780bcddf3452cf5b268b9125eaad8ba7",
"size": "2051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baidupcs/pcserror/pcserror.go",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "441761"
},
{
"name": "JavaScript",
"bytes": "1689"
},
{
"name": "Shell",
"bytes": "4567"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.