code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30 values | license stringclasses 15 values | size int64 3 1.01M |
|---|---|---|---|---|---|
#!/usr/bin/env python
# File created on 20 Feb 2013
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso", "Kyle Bittinger", "Justin Kuczynski",
"Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os.path import split, splitext, abspath
from qiime.util import create_dir
from qiime.workflow.util import (print_to_stdout,
generate_log_fp,
WorkflowLogger,
log_input_md5s,
get_params_str)
def run_pick_de_novo_otus(input_fp,
output_dir,
command_handler,
params,
qiime_config,
parallel=False,
logger=None,
suppress_md5=False,
status_update_callback=print_to_stdout):
""" Run the data preparation steps of Qiime
The steps performed by this function are:
1) Pick OTUs;
2) Pick a representative set;
3) Align the representative set;
4) Assign taxonomy;
5) Filter the alignment prior to tree building - remove positions
which are all gaps, and specified as 0 in the lanemask
6) Build a phylogenetic tree;
7) Build an OTU table.
"""
# Prepare some variables for the later steps
input_dir, input_filename = split(input_fp)
input_basename, input_ext = splitext(input_filename)
create_dir(output_dir)
commands = []
cluster_failures = False
if logger is None:
logger = WorkflowLogger(generate_log_fp(output_dir),
params=params,
qiime_config=qiime_config)
close_logger_on_success = True
else:
close_logger_on_success = False
if not suppress_md5:
log_input_md5s(logger, [input_fp])
# Prep the OTU picking command
try:
otu_picking_method = params['pick_otus']['otu_picking_method']
except KeyError:
otu_picking_method = 'uclust'
pick_otu_dir = '%s/%s_picked_otus' % (output_dir, otu_picking_method)
otu_fp = '%s/%s_otus.txt' % (pick_otu_dir, input_basename)
if parallel and (otu_picking_method == 'blast' or
otu_picking_method == 'uclust_ref'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the OTU picker parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --otu_picking_method
# option. This works for now though.
d = params['pick_otus'].copy()
del d['otu_picking_method']
except KeyError:
pass
if otu_picking_method == 'uclust_ref':
try:
suppress_new_clusters = d['suppress_new_clusters']
del d['suppress_new_clusters']
cluster_failures = False
except KeyError:
cluster_failures = True
failure_otu_picking_method = 'uclust'
params_str += ' %s' % get_params_str(d)
otu_picking_script = 'parallel_pick_otus_%s.py' % otu_picking_method
# Build the OTU picking command
pick_otus_cmd = '%s -i %s -o %s -T %s' % (otu_picking_script,
input_fp,
pick_otu_dir,
params_str)
else:
try:
params_str = get_params_str(params['pick_otus'])
except KeyError:
params_str = ''
# Build the OTU picking command
pick_otus_cmd = 'pick_otus.py -i %s -o %s %s' %\
(input_fp, pick_otu_dir, params_str)
commands.append([('Pick OTUs', pick_otus_cmd)])
if cluster_failures:
reference_otu_fp = otu_fp
clustered_failures_dir = '%s/failure_otus/' % pick_otu_dir
try:
d = params['pick_otus'].copy()
del d['otu_picking_method']
except KeyError:
pass
if 'denovo_otu_id_prefix' not in d:
d['denovo_otu_id_prefix'] = 'DeNovoOTU'
params_str = ' %s' % get_params_str(d)
failures_list_fp = '%s/%s_failures.txt' % \
(pick_otu_dir, input_basename)
failures_fasta_fp = '%s/%s_failures.fasta' % \
(pick_otu_dir, input_basename)
filter_fasta_cmd = 'filter_fasta.py -f %s -s %s -o %s' %\
(input_fp, failures_list_fp, failures_fasta_fp)
commands.append([('Generate failures fasta file',
filter_fasta_cmd)])
# Prep the OTU picking command for
failure_otu_fp = '%s/%s_failures_otus.txt' % (clustered_failures_dir,
input_basename)
# Build the OTU picking command
pick_otus_cmd = 'pick_otus.py -i %s -o %s -m %s %s' %\
(failures_fasta_fp, clustered_failures_dir,
failure_otu_picking_method, params_str)
commands.append(
[('Pick de novo OTUs for new clusters', pick_otus_cmd)])
merged_otu_map_fp = '%s/merged_otu_map.txt' % clustered_failures_dir
cat_otu_tables_cmd = 'cat %s %s >> %s' %\
(reference_otu_fp, failure_otu_fp, merged_otu_map_fp)
commands.append([('Merge OTU maps', cat_otu_tables_cmd)])
otu_fp = merged_otu_map_fp
# Prep the representative set picking command
rep_set_dir = '%s/rep_set/' % output_dir
create_dir(rep_set_dir)
rep_set_fp = '%s/%s_rep_set.fasta' % (rep_set_dir, input_basename)
rep_set_log_fp = '%s/%s_rep_set.log' % (rep_set_dir, input_basename)
try:
params_str = get_params_str(params['pick_rep_set'])
except KeyError:
params_str = ''
# Build the representative set picking command
pick_rep_set_cmd = 'pick_rep_set.py -i %s -f %s -l %s -o %s %s' %\
(otu_fp, input_fp, rep_set_log_fp, rep_set_fp, params_str)
commands.append([('Pick representative set', pick_rep_set_cmd)])
# Prep the taxonomy assignment command
try:
assignment_method = params['assign_taxonomy']['assignment_method']
except KeyError:
assignment_method = 'uclust'
assign_taxonomy_dir = '%s/%s_assigned_taxonomy' %\
(output_dir, assignment_method)
taxonomy_fp = '%s/%s_rep_set_tax_assignments.txt' % \
(assign_taxonomy_dir, input_basename)
if parallel and (assignment_method == 'rdp' or
assignment_method == 'blast' or
assignment_method == 'uclust'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the taxonomy assignment parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --assignment_method
# option. This works for now though.
d = params['assign_taxonomy'].copy()
if 'assignment_method' in d:
del d['assignment_method']
params_str += ' %s' % get_params_str(d)
except KeyError:
pass
# Build the parallel taxonomy assignment command
assign_taxonomy_cmd = \
'parallel_assign_taxonomy_%s.py -i %s -o %s -T %s' %\
(assignment_method, rep_set_fp, assign_taxonomy_dir, params_str)
else:
try:
params_str = get_params_str(params['assign_taxonomy'])
except KeyError:
params_str = ''
# Build the taxonomy assignment command
assign_taxonomy_cmd = 'assign_taxonomy.py -o %s -i %s %s' %\
(assign_taxonomy_dir, rep_set_fp, params_str)
commands.append([('Assign taxonomy', assign_taxonomy_cmd)])
# Prep the OTU table building command
otu_table_fp = '%s/otu_table.biom' % output_dir
try:
params_str = get_params_str(params['make_otu_table'])
except KeyError:
params_str = ''
# Build the OTU table building command
make_otu_table_cmd = 'make_otu_table.py -i %s -t %s -o %s %s' %\
(otu_fp, taxonomy_fp, otu_table_fp, params_str)
commands.append([('Make OTU table', make_otu_table_cmd)])
if cluster_failures:
reference_otu_table_fp = '%s/reference_only_otu_table.biom' % output_dir
# Build the OTU table building command
make_otu_table_cmd = 'make_otu_table.py -i %s -t %s -o %s %s' %\
(reference_otu_fp, taxonomy_fp, reference_otu_table_fp, params_str)
commands.append(
[('Make reference-only OTU table', make_otu_table_cmd)])
# Prep the pynast alignment command
try:
alignment_method = params['align_seqs']['alignment_method']
except KeyError:
alignment_method = 'pynast'
pynast_dir = '%s/%s_aligned_seqs' % (output_dir, alignment_method)
aln_fp = '%s/%s_rep_set_aligned.fasta' % (pynast_dir, input_basename)
if parallel and alignment_method == 'pynast':
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the alignment parameters
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --alignment_method
# option. This works for now though.
try:
d = params['align_seqs'].copy()
except KeyError:
d = {}
try:
del d['alignment_method']
except KeyError:
pass
params_str += ' %s' % get_params_str(d)
# Build the parallel pynast alignment command
align_seqs_cmd = 'parallel_align_seqs_pynast.py -i %s -o %s -T %s' %\
(rep_set_fp, pynast_dir, params_str)
else:
try:
params_str = get_params_str(params['align_seqs'])
except KeyError:
params_str = ''
# Build the pynast alignment command
align_seqs_cmd = 'align_seqs.py -i %s -o %s %s' %\
(rep_set_fp, pynast_dir, params_str)
commands.append([('Align sequences', align_seqs_cmd)])
# Prep the alignment filtering command
filtered_aln_fp = '%s/%s_rep_set_aligned_pfiltered.fasta' %\
(pynast_dir, input_basename)
try:
params_str = get_params_str(params['filter_alignment'])
except KeyError:
params_str = ''
# Build the alignment filtering command
filter_alignment_cmd = 'filter_alignment.py -o %s -i %s %s' %\
(pynast_dir, aln_fp, params_str)
commands.append([('Filter alignment', filter_alignment_cmd)])
# Prep the tree building command
tree_fp = '%s/rep_set.tre' % output_dir
try:
params_str = get_params_str(params['make_phylogeny'])
except KeyError:
params_str = ''
# Build the tree building command
make_phylogeny_cmd = 'make_phylogeny.py -i %s -o %s %s' %\
(filtered_aln_fp, tree_fp, params_str)
commands.append([('Build phylogenetic tree', make_phylogeny_cmd)])
# Call the command handler on the list of commands
command_handler(commands,
status_update_callback,
logger=logger,
close_logger_on_success=close_logger_on_success)
return abspath(tree_fp), abspath(otu_table_fp)
run_qiime_data_preparation = run_pick_otus_through_otu_table = run_pick_de_novo_otus
def run_pick_closed_reference_otus(
input_fp,
refseqs_fp,
output_dir,
taxonomy_fp,
command_handler,
params,
qiime_config,
assign_taxonomy=False,
parallel=False,
logger=None,
suppress_md5=False,
status_update_callback=print_to_stdout):
""" Run the data preparation steps of Qiime
The steps performed by this function are:
1) Pick OTUs;
2) If assignment_taxonomy is True, choose representative sequence
for OTUs and assign taxonomy using a classifier.
3) Build an OTU table with optional predefined taxonomy
(if assign_taxonomy=False) or taxonomic assignments from step 2
(if assign_taxonomy=True).
"""
# confirm that a valid otu picking method was supplied before doing
# any work
reference_otu_picking_methods = ['blast', 'uclust_ref', 'usearch61_ref',
'usearch_ref', 'sortmerna']
try:
otu_picking_method = params['pick_otus']['otu_picking_method']
except KeyError:
otu_picking_method = 'uclust_ref'
assert otu_picking_method in reference_otu_picking_methods,\
"Invalid OTU picking method supplied: %s. Valid choices are: %s"\
% (otu_picking_method, ' '.join(reference_otu_picking_methods))
# Prepare some variables for the later steps
input_dir, input_filename = split(input_fp)
input_basename, input_ext = splitext(input_filename)
create_dir(output_dir)
commands = []
if logger is None:
logger = WorkflowLogger(generate_log_fp(output_dir),
params=params,
qiime_config=qiime_config)
close_logger_on_success = True
else:
close_logger_on_success = False
if not suppress_md5:
log_input_md5s(logger, [input_fp, refseqs_fp, taxonomy_fp])
# Prep the OTU picking command
pick_otu_dir = '%s/%s_picked_otus' % (output_dir, otu_picking_method)
otu_fp = '%s/%s_otus.txt' % (pick_otu_dir, input_basename)
if parallel and (otu_picking_method == 'blast' or
otu_picking_method == 'uclust_ref' or
otu_picking_method == 'usearch61_ref' or
otu_picking_method == 'sortmerna'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the OTU picker parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --alignment_method
# option. This works for now though.
d = params['pick_otus'].copy()
if 'otu_picking_method' in d:
del d['otu_picking_method']
params_str += ' %s' % get_params_str(d)
except KeyError:
pass
otu_picking_script = 'parallel_pick_otus_%s.py' % otu_picking_method
# Build the OTU picking command
pick_otus_cmd = '%s -i %s -o %s -r %s -T %s' %\
(otu_picking_script,
input_fp,
pick_otu_dir,
refseqs_fp,
params_str)
else:
try:
params_str = get_params_str(params['pick_otus'])
except KeyError:
params_str = ''
# Since this is reference-based OTU picking we always want to
# suppress new clusters -- force it here.
params_str += ' --suppress_new_clusters'
logger.write(
"Forcing --suppress_new_clusters as this is "
"closed-reference OTU picking.\n\n")
# Build the OTU picking command
pick_otus_cmd = 'pick_otus.py -i %s -o %s -r %s -m %s %s' %\
(input_fp,
pick_otu_dir,
refseqs_fp,
otu_picking_method,
params_str)
commands.append([('Pick OTUs', pick_otus_cmd)])
# Assign taxonomy using a taxonomy classifier, if request by the user.
# (Alternatively predefined taxonomic assignments will be used, if provided.)
if assign_taxonomy:
# Prep the representative set picking command
rep_set_dir = '%s/rep_set/' % output_dir
create_dir(rep_set_dir)
rep_set_fp = '%s/%s_rep_set.fasta' % (rep_set_dir, input_basename)
rep_set_log_fp = '%s/%s_rep_set.log' % (rep_set_dir, input_basename)
try:
params_str = get_params_str(params['pick_rep_set'])
except KeyError:
params_str = ''
# Build the representative set picking command
pick_rep_set_cmd = 'pick_rep_set.py -i %s -f %s -l %s -o %s %s' %\
(otu_fp, input_fp, rep_set_log_fp, rep_set_fp, params_str)
commands.append([('Pick representative set', pick_rep_set_cmd)])
# Prep the taxonomy assignment command
try:
assignment_method = params['assign_taxonomy']['assignment_method']
except KeyError:
assignment_method = 'uclust'
assign_taxonomy_dir = '%s/%s_assigned_taxonomy' %\
(output_dir, assignment_method)
taxonomy_fp = '%s/%s_rep_set_tax_assignments.txt' % \
(assign_taxonomy_dir, input_basename)
if parallel and (assignment_method == 'rdp' or
assignment_method == 'blast' or
assignment_method == 'uclust'):
# Grab the parallel-specific parameters
try:
params_str = get_params_str(params['parallel'])
except KeyError:
params_str = ''
# Grab the taxonomy assignment parameters
try:
# Want to find a cleaner strategy for this: the parallel script
# is method-specific, so doesn't take a --assignment_method
# option. This works for now though.
d = params['assign_taxonomy'].copy()
if 'assignment_method' in d:
del d['assignment_method']
params_str += ' %s' % get_params_str(d)
except KeyError:
pass
# Build the parallel taxonomy assignment command
assign_taxonomy_cmd = \
'parallel_assign_taxonomy_%s.py -i %s -o %s -T %s' %\
(assignment_method, rep_set_fp, assign_taxonomy_dir, params_str)
else:
try:
params_str = get_params_str(params['assign_taxonomy'])
except KeyError:
params_str = ''
# Build the taxonomy assignment command
assign_taxonomy_cmd = 'assign_taxonomy.py -o %s -i %s %s' %\
(assign_taxonomy_dir, rep_set_fp, params_str)
commands.append([('Assign taxonomy', assign_taxonomy_cmd)])
# Prep the OTU table building command
otu_table_fp = '%s/otu_table.biom' % output_dir
try:
params_str = get_params_str(params['make_otu_table'])
except KeyError:
params_str = ''
# If assign_taxonomy is True, this will be the path to the taxonomic
# assignment results. If assign_taxonomy is False this will be either
# the precomputed taxonomic assignments that the user passed in,
# or None.
if taxonomy_fp:
taxonomy_str = '-t %s' % taxonomy_fp
else:
taxonomy_str = ''
# Build the OTU table building command
make_otu_table_cmd = 'make_otu_table.py -i %s %s -o %s %s' %\
(otu_fp, taxonomy_str, otu_table_fp, params_str)
commands.append([('Make OTU table', make_otu_table_cmd)])
# Call the command handler on the list of commands
command_handler(commands,
status_update_callback,
logger=logger,
close_logger_on_success=close_logger_on_success)
run_pick_reference_otus_through_otu_table = run_pick_closed_reference_otus
| adamrp/qiime | qiime/workflow/upstream.py | Python | gpl-2.0 | 19,937 |
/* Copyright_License {
XCSoar Glide Computer - http://www.xcsoar.org/
Copyright (C) 2000-2016 The XCSoar Project
A detailed list of copyright holders can be found in the file "AUTHORS".
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
}
*/
#include "FlatTriangleFan.hpp"
#include "Math/Line2D.hpp"
#include <assert.h>
void
FlatTriangleFan::CalcBoundingBox()
{
assert(!vs.empty());
auto it = vs.begin(), end = vs.end();
bounding_box = FlatBoundingBox(*it);
for (++it; it != end; ++it)
bounding_box.Expand(*it);
}
static constexpr bool
IsSpike(FlatGeoPoint a, FlatGeoPoint b, FlatGeoPoint c)
{
return Line2D<FlatGeoPoint>(a, b).Contains(c);
}
void
FlatTriangleFan::AddOrigin(const AFlatGeoPoint &origin, size_t reserve)
{
assert(vs.empty());
height = origin.altitude;
vs.reserve(reserve + 1);
vs.push_back(origin);
}
void
FlatTriangleFan::AddPoint(FlatGeoPoint p)
{
assert(!vs.empty());
// avoid duplicates
if (p == vs.back())
return;
if (vs.size() >= 2 && IsSpike(vs[vs.size() - 2], vs.back(), p))
/* avoid spikes */
return;
vs.push_back(p);
}
/**
* Is there a spike wrapping around beginning and end of the
* container?
*/
static bool
IsWrappedSpike(ConstBuffer<FlatGeoPoint> hull)
{
assert(hull.size > 3);
return IsSpike(hull[hull.size - 2], hull[hull.size - 1], hull[0]) ||
IsSpike(hull[hull.size - 1], hull[0], hull[1]);
}
bool
FlatTriangleFan::CommitPoints(bool closed)
{
auto hull = GetHull(closed);
while (hull.size > 3) {
if (!IsWrappedSpike(hull))
/* no spikes left: success! */
return true;
/* erase this spike */
vs.pop_back();
hull.pop_back();
/* .. and continue searching */
}
/* not enough points: fail */
return false;
}
bool
FlatTriangleFan::IsInside(FlatGeoPoint p, bool closed) const
{
if (!bounding_box.IsInside(p))
return false;
bool inside = false;
const auto hull = GetHull(closed);
for (auto i = hull.begin(), end = hull.end(), j = std::prev(end);
i != end; j = i++) {
if ((i->y > p.y) == (j->y > p.y))
continue;
const FlatGeoPoint ji = *j - *i;
const FlatGeoPoint pi = p - *i;
if (0 < ji.x * pi.y / ji.y - pi.x)
inside = !inside;
}
return inside;
}
| Exadios/XCSoar-the-library | src/Engine/Route/FlatTriangleFan.cpp | C++ | gpl-2.0 | 2,904 |
/* ==========================================================================
Normalize.scss settings
========================================================================== */
/**
* Includes legacy browser support IE6/7
*
* Set to false if you want to drop support for IE6 and IE7
*/
/* ==========================================================================
HTML5 display definitions
========================================================================== */
/*
* Corrects `block` display not defined in IE 8/9.
*/
article,
aside,
details,
figcaption,
figure,
footer,
header,
hgroup,
main,
nav,
section,
summary {
display: block; /* hello */
}
/**
* Correct `inline-block` display not defined in IE 6/7/8/9 and Firefox 3.
*/
audio,
canvas,
video {
display: inline-block;
}
/**
* Prevents modern browsers from displaying `audio` without controls.
* Remove excess height in iOS 5 devices.
*/
audio:not([controls]) {
display: none;
height: 0;
}
/**
* Address styling not present in IE 8/9.
*/
[hidden] {
display: none;
}
/* ==========================================================================
Base
========================================================================== */
/**
* 1. Set default font family to sans-serif.
* 2. Prevent iOS text size adjust after orientation change, without disabling
* 3.Corrects text resizing oddly in IE 6/7 when body `font-size` is set using
* `em` units.
*/
html {
font-family: sans-serif;
/* 1 */
-ms-text-size-adjust: 100%;
/* 2 */
-webkit-text-size-adjust: 100%;
/* 2 */
}
/**
* Remove default margin.
*/
body {
margin: 0;
}
/* ==========================================================================
Links
========================================================================== */
/**
* Address `outline` inconsistency between Chrome and other browsers.
*/
/**
* Improves readability when focused and also mouse hovered in all browsers.
*/
a:focus {
outline: thin dotted;
}
a:active,
a:hover {
outline: 0;
}
/* ==========================================================================
Typography
========================================================================== */
/**
* Addresses font sizes and margins set differently in IE 6/7.
* Address variable `h1` font-size and margin within `section` and `article`
* contexts in Firefox 4+, Safari 5, and Chrome.
*/
/**
* Address styling not present in IE 8/9, Safari 5, and Chrome.
*/
abbr[title] {
border-bottom: 1px dotted;
}
/**
* Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome.
*/
b,
strong {
font-weight: bold;
}
/**
* Address styling not present in Safari 5 and Chrome.
*/
dfn {
font-style: italic;
}
/**
* Address differences between Firefox and other browsers.
*/
hr {
-moz-box-sizing: content-box;
box-sizing: content-box;
height: 0;
}
/**
* Addresses styling not present in IE 8/9.
*/
mark {
background: #ff0;
color: #000;
}
/**
* Addresses margins set differently in IE 6/7.
*/
/**
* Correct font family set oddly in Safari 5 and Chrome.
*/
code,
kbd,
pre,
samp {
font-family: monospace, serif;
font-size: 1em;
}
/**
* Improve readability of pre-formatted text in all browsers.
*/
pre {
white-space: pre-wrap;
}
/**
* Set consistent quote types.
*/
q {
quotes: "\201C" "\201D" "\2018" "\2019";
}
/**
* Address inconsistent and variable font size in all browsers.
*/
small {
font-size: 80%;
}
/**
* Prevent `sub` and `sup` affecting `line-height` in all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sup {
top: -0.5em;
}
sub {
bottom: -0.25em;
}
/* ==========================================================================
Embedded content
========================================================================== */
/**
* 1. Remove border when inside `a` element in IE 8/9.
* 2. Improves image quality when scaled in IE 7.
*/
img {
border: 0;
}
/**
* Correct overflow displayed oddly in IE 9.
*/
svg:not(:root) {
overflow: hidden;
}
/* ==========================================================================
Figures
========================================================================== */
/**
* Address margin not present in IE 8/9 and Safari 5.
*/
figure {
margin: 0;
}
/* ==========================================================================
Forms
========================================================================== */
/**
* Define consistent border, margin, and padding.
*/
fieldset {
border: 1px solid #c0c0c0;
margin: 0 2px;
padding: 0.35em 0.625em 0.75em;
}
/**
* 1. Correct `color` not being inherited in IE 8/9.
* 2. Remove padding so people aren't caught out if they zero out fieldsets.
* 3. Corrects text not wrapping in Firefox 3.
* 4. Corrects alignment displayed oddly in IE 6/7.
*/
legend {
border: 0;
/* 1 */
padding: 0;
/* 2 */
}
/**
* 1. Correct font family not being inherited in all browsers.
* 2. Correct font size not being inherited in all browsers.
* 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome.
* 4. Improves appearance and consistency in all browsers.
*/
button,
input,
select,
textarea {
font-family: inherit;
/* 1 */
font-size: 100%;
/* 2 */
margin: 0;
/* 3 */
}
/**
* Address Firefox 4+ setting `line-height` on `input` using `!important` in
* the UA stylesheet.
*/
button,
input {
line-height: normal;
}
/**
* Address inconsistent `text-transform` inheritance for `button` and `select`.
* All other form control elements do not inherit `text-transform` values.
* Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+.
* Correct `select` style inheritance in Firefox 4+ and Opera.
*/
button,
select {
text-transform: none;
}
/**
* 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`
* and `video` controls.
* 2. Correct inability to style clickable `input` types in iOS.
* 3. Improve usability and consistency of cursor style between image-type
* `input` and others.
* 4. Removes inner spacing in IE 7 without affecting normal text inputs.
* Known issue: inner spacing remains in IE 6.
*/
button,
html input[type="button"],
input[type="reset"],
input[type="submit"] {
-webkit-appearance: button;
/* 2 */
cursor: pointer;
/* 3 */
}
/**
* Re-set default cursor for disabled elements.
*/
button[disabled],
html input[disabled] {
cursor: default;
}
/**
* 1. Address box sizing set to `content-box` in IE 8/9.
* 2. Remove excess padding in IE 8/9.
* 3. Removes excess padding in IE 7.
* Known issue: excess padding remains in IE 6.
*/
input[type="checkbox"],
input[type="radio"] {
-moz-box-sizing: border-box;
box-sizing: border-box;
/* 1 */
padding: 0;
/* 2 */
}
/**
* 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome.
* 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome
* (include `-moz` to future-proof).
*/
input[type="search"] {
-webkit-appearance: textfield;
/* 1 */
-moz-box-sizing: content-box;
/* 2 */
box-sizing: content-box;
}
/**
* Remove inner padding and search cancel button in Safari 5 and Chrome
* on OS X.
*/
input[type="search"]::-webkit-search-cancel-button,
input[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
/**
* Remove inner padding and border in Firefox 4+.
*/
button::-moz-focus-inner,
input::-moz-focus-inner {
border: 0;
padding: 0;
}
/**
* 1. Remove default vertical scrollbar in IE 8/9.
* 2. Improve readability and alignment in all browsers.
*/
textarea {
overflow: auto;
/* 1 */
vertical-align: top;
/* 2 */
}
/* ==========================================================================
Tables
========================================================================== */
/**
* Remove most spacing between table cells.
*/
table {
border-collapse: collapse;
border-spacing: 0;
}
/* Generated with FontPrep app http://fontprep.com/ */
@font-face {
font-family: 'AlternateGothic2 BT';
src: url("../fonts/alternate-gothic2.eot");
/* IE9 Compat Modes */
src: url("../fonts/alternate-gothic2.eot?#iefix") format("embedded-opentype"), url("../fonts/alternate-gothic2.woff") format("woff"), url("../fonts/alternate-gothic2.ttf") format("truetype"), url("../fonts/alternate-gothic2.svg#022c20d3c2b5208bc4ff771e4b5afcc9") format("svg");
/* Legacy iOS */
font-style: normal;
font-weight: 400;
}
/*
* Web Fonts from fontspring.com
*
* All OpenType features and all extended glyphs have been removed.
* Fully installable fonts can be purchased at http://www.fontspring.com
*
* The fonts included in this stylesheet are subject to the End User License you purchased
* from Fontspring. The fonts are protected under domestic and international trademark and
* copyright law. You are prohibited from modifying, reverse engineering, duplicating, or
* distributing this font software.
*
* (c) 2010-2012 Fontspring
*
*
*
*
* The fonts included are copyrighted by the vendor listed below.
*
* Vendor: exljbris Font Foundry
* License URL: http://www.fontspring.com/fflicense/exljbris
*
*
*/
@font-face {
font-family: 'museo_slab500';
src: url("../fonts/Museo_Slab_500_2-webfont.eot");
src: url("../fonts/Museo_Slab_500_2-webfont.eot?#iefix") format("embedded-opentype"), url("../fonts/Museo_Slab_500_2-webfont.woff") format("woff"), url("../fonts/Museo_Slab_500_2-webfont.ttf") format("truetype"), url("../fonts/Museo_Slab_500_2-webfont.svg#museo_slab500") format("svg");
font-weight: normal;
font-style: normal;
}
@font-face {
font-family: 'Museo300Regular';
src: url("../fonts/Museo300-Regular-webfont.eot");
src: url("../fonts/Museo300-Regular-webfont.eot?#iefix") format("embedded-opentype"), url("../fonts/Museo300-Regular-webfont.woff") format("woff"), url("../fonts/Museo300-Regular-webfont.ttf") format("truetype"), url("../fonts/Museo300-Regular-webfont.svg#Museo300Regular") format("svg");
font-weight: normal;
font-style: normal;
}
@font-face {
font-family: 'Museo500Regular';
src: url("../fonts/Museo500-Regular-webfont.eot");
src: url("../fonts/Museo500-Regular-webfont.eot?#iefix") format("embedded-opentype"), url("../fonts/Museo500-Regular-webfont.woff") format("woff"), url("../fonts/Museo500-Regular-webfont.ttf") format("truetype"), url("../fonts/Museo500-Regular-webfont.svg#Museo500Regular") format("svg");
font-weight: normal;
font-style: normal;
}
@font-face {
font-family: 'bills';
src: url("../fonts/bills.eot");
src: url("../fonts/bills.eot?#iefix") format("embedded-opentype"), url("../fonts/bills.woff") format("woff"), url("../fonts/bills.ttf") format("truetype"), url("../fonts/bills.svg#bills") format("svg");
font-weight: normal;
font-style: normal;
}
.icon-ignite-logo,
.icon-facebook,
.icon-twitter,
.icon-pinterest,
.icon-googleplus,
.icon-reorder,
.icon-pointer,
.icon-arrow-right,
.icon-arrow-left,
.icon-angle-down,
.icon-circle-arrow-down,
.icon-menus,
.icon-capacity,
.icon-hours,
.icon-search,
.icon-venue-stretch,
.icon-angle-up,
.icon-angle-right,
.icon-angle-left {
font-family: 'bills';
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
-webkit-font-smoothing: antialiased;
}
.icon-ignite-logo:before {
content: "\e000";
}
.icon-facebook:before {
content: "\e001";
}
.icon-twitter:before {
content: "\e002";
}
.icon-pinterest:before {
content: "\e003";
}
.icon-googleplus:before {
content: "\e004";
}
.icon-reorder:before {
content: "\e005";
}
.icon-pointer:before {
content: "\e006";
}
.icon-arrow-right:before {
content: "\e007";
}
.icon-arrow-left:before {
content: "\e008";
}
.icon-angle-down:before {
content: "\e009";
}
.icon-circle-arrow-down:before {
content: "\e010";
}
.icon-menus:before {
content: "\e011";
}
.icon-capacity:before {
content: "\e012";
}
.icon-hours:before {
content: "\e013";
}
.icon-search:before {
content: "\f002";
}
.icon-venue-stretch:before {
content: "\e00a";
}
.icon-angle-up:before {
content: "\f106";
}
.icon-angle-right:before {
content: "\f105";
}
.icon-angle-left:before {
content: "\f104";
}
/* TYPOGRAPHY */
/* COLOURS */
/* BORDERS */
/* SPACING */
/* RELATIVE SIZES */
/* BREAKPOINTS */
/* LOGO */
/* IE */
/*------------------------------------*\
$FONT SIZE
\*------------------------------------*/
/*------------------------------------*\
$MEDIA QUERY
\*------------------------------------*/
/*------------------------------------*\
$CLEARFIX
\*------------------------------------*/
.clearfix:after,
.media:after,
.wrapper:after,
.site-header:after,
.logo-wrapper:after,
.site-social:after,
.site-social__list:after,
.pagination:after,
.menu-location:after,
.venue:after,
.venue__row:after,
.page--hours-and-locations .booking-widget:after,
.booking-widget__form:after {
content: "";
display: table;
clear: both;
}
/*------------------------------------*\
$IMAGE REPLACEMENT
\*------------------------------------*/
.image-replacement {
border: 0;
font: 0/0 a;
text-shadow: none;
color: transparent;
background-color: transparent;
}
/*------------------------------------*\
$JAVASCRIPT MEDIA QUERIES
\*------------------------------------*/
/**
* Sync CSS media queries with perfectly using the following javascript:
*
var size = window.getComputedStyle(document.body,':after').getPropertyValue('content');
if (size.indexOf("lap-and-up") !=-1) {
} else if (size.indexOf("desk") !=-1) {
} else if (size.indexOf("desk-wide") !=-1) {
}
*
*/
body:after {
display: none;
}
@media only screen and (min-width: 569px) {
body:after {
content: 'lap-and-up';
}
}
@media only screen and (min-width: 1024px) {
body:after {
content: 'desk lap-and-up';
}
}
@media only screen and (min-width: 1200px) {
body:after {
content: 'desk-wide desk lap-and-up';
}
}
/*------------------------------------*\
$LISTS
\*------------------------------------*/
.clear-list,
.inline-list {
list-style-type: none;
padding: 0;
margin: 0;
}
.inline-list > * {
display: inline-block;
}
/*------------------------------------*\
$MEDIA OBJECT
\*------------------------------------*/
.media {
display: block;
}
.media__img {
float: left;
margin-right: 22px;
}
.media__img--rev {
float: right;
margin-left: 22px;
}
.media__img img,
.media__img--rev img {
display: block;
}
.media__body {
overflow: hidden;
}
.media__body > * {
margin-top: 0;
}
.media__body,
.media__body > :last-child {
margin-bottom: 0;
}
/*------------------------------------*\
$BUTTONS
\*------------------------------------*/
.button,
.venue-menus a {
display: inline-block;
background-color: #ff8400;
color: white;
padding: 11px;
}
.button--big {
padding: 22px;
}
/*------------------------------------*\
$ICON
\*------------------------------------*/
.icon--left {
margin-right: 5.5px;
}
.icon--right {
margin-left: 5.5px;
}
.icon-rounded {
display: inline-block;
padding: 5.5px;
color: #b3b3b3;
border-radius: 44px;
background-color: white;
}
/*------------------------------------*\
$RESPONSIVE-NAV.JS v1.0.14 by @viljamis
\*------------------------------------*/
/**
* I tried not to touch this as it is not written by me, but I was forced to alter selectors to get rid of the id's in the CSS.
* Changed everything from '#nav' to '.nav' and from '#nav-toggle' to '.nav-toggle'.
*/
.nav ul {
margin: 0;
padding: 0;
width: 100%;
display: block;
list-style: none;
}
.nav li {
width: 100%;
display: block;
}
.js .nav {
clip: rect(0 0 0 0);
max-height: 0;
position: absolute;
display: block;
overflow: hidden;
zoom: 1;
}
@media only screen and (min-width: 569px) {
.js .nav {
position: relative;
}
}
.js .nav.closed {
max-height: none;
}
.nav.opened {
max-height: 9999px;
}
@media only screen and (min-width: 569px) {
.nav-toggle {
display: none;
}
}
/* ----------------------------------------------------------------
MaxCycle (Fullscreen Slideshow for use with jQuery Cycle Plugin)
----------------------------------------------------------------
Demo at: http://www.aaronvanderzwan.com/maxcycle/
Download and Info at: http://github.com/akv2/MaxCycle---jQuery-Plugin/
Copyright (c) 2007-2011 Aaron Vanderzwan
Dual licensed under the MIT and GPL licenses.
*/
/* Version: 2.0.73 (12-Oct-2012) */
.mc-hide-scrolls {
overflow: hidden;
}
body .mc-cycle {
height: 100%;
left: 0;
overflow: hidden;
position: fixed;
top: 0;
width: 100%;
z-index: -1;
}
div.mc-image {
/*NOTE: Mozilla flickers when fading and using 'all', so we have to be specific with what property we want to transition:
If you are using fading transitions, use 'opacity: */
-webkit-transition: opacity 1s ease-in-out;
-o-transition: opacity 1s ease-in-out;
transition: opacity 1s ease-in-out;
/* If you are using horizontal slide transitions, use the following CSS: */
-webkit-transition: left 1s ease-in-out;
-o-transition: left 1s ease-in-out;
transition: left 1s ease-in-out;
-webkit-background-size: cover;
background-size: cover;
background-position: center center;
background-repeat: no-repeat;
height: 100%;
overflow: hidden;
width: 100%;
}
.mc-old-browser .mc-image {
overflow: hidden;
}
* {
-moz-box-sizing: border-box;
box-sizing: border-box;
}
html {
position: relative;
min-height: 100%;
overflow-x: hidden;
font: 0.875em/1.57143 "Museo500Regular", sans-serif;
color: white;
background: #222222;
}
body {
height: 100%;
}
h1,
h2,
h3,
h4,
h5,
h6,
ul,
ol,
dl,
blockquote,
p,
address,
table,
fieldset,
figure,
pre,
.main__content,
.news__item,
.post__header,
.post__aside,
.box-heading,
.menu-location,
.menu-nav,
.menu__item,
.venue,
#map_canvas,
#map_canvas_loc,
.media {
margin-bottom: 22px;
margin-bottom: 1.57143rem;
}
.menu-locations,
#map_accordion ul,
.lead,
.landmark {
margin-bottom: 44px;
margin-bottom: 3.14286rem;
}
.menu-nav__item > a {
margin-bottom: 11px;
margin-bottom: 0.78571rem;
}
h1,
h2,
h3,
h4,
h5,
h6,
.page-title--mobile,
.book-toggle,
.nav-toggle,
.site-nav,
.splash,
.pagination,
.social-bit,
.post__aside,
.post__back,
.menu-locations__item,
#map_accordion li,
.venue__footer,
.book-circle__inner {
font-family: "AlternateGothic2 BT", sans-serif;
font-weight: normal;
text-transform: uppercase;
text-rendering: optimizeLegibility;
}
h1,
.alpha {
font-size: 36px;
font-size: 2.57143rem;
line-height: 1.22222;
}
h2,
.beta {
font-size: 30px;
font-size: 2.14286rem;
line-height: 1.46667;
}
h3,
.gamma {
font-size: 24px;
font-size: 1.71429rem;
line-height: 1.83333;
}
h4,
.delta {
font-size: 20px;
font-size: 1.42857rem;
line-height: 1.1;
}
h5,
.epsilon {
font-size: 16px;
font-size: 1.14286rem;
line-height: 1.375;
}
h6,
.zeta {
font-size: 14px;
font-size: 1rem;
line-height: 1.57143;
}
a {
text-decoration: none;
color: #ff8400;
}
.site-header a {
color: white;
}
.main__content img {
max-width: 100%;
}
/*------------------------------------*\
$WRAPPER
\*------------------------------------*/
.wrapper {
min-height: 100%;
}
@media only screen and (max-width: 568px) {
.wrapper {
overflow-x: hidden;
}
}
@media only screen and (max-width: 1023px) {
.wrapper {
background: -webkit-linear-gradient(top, rgba(0, 0, 0, 0.9) 0%, transparent 176px);
background: -o-linear-gradient(top, rgba(0, 0, 0, 0.9) 0%, transparent 176px);
background: linear-gradient(to bottom, rgba(0, 0, 0, 0.9) 0%, transparent 176px);
}
}
@media only screen and (min-width: 569px) {
.wrapper {
padding: 22px;
}
}
/*------------------------------------*\
$HEADER
\*------------------------------------*/
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.site-header {
border-bottom: white solid 5px;
margin-bottom: 22px;
position: relative;
}
}
@media only screen and (min-width: 1024px) {
.site-header {
position: fixed;
left: 0;
top: 0;
bottom: 0;
height: 100%;
padding: 22px;
z-index: 20;
width: 235px;
background: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.9) 0%, transparent 100%);
background: -o-linear-gradient(left, rgba(0, 0, 0, 0.9) 0%, transparent 100%);
background: linear-gradient(to right, rgba(0, 0, 0, 0.9) 0%, transparent 100%);
}
}
@media only screen and (min-width: 1024px) and (max-height: 600px) {
.site-header {
position: absolute;
}
}
/*------------------------------------*\
$MAIN
\*------------------------------------*/
@media only screen and (min-width: 1024px) {
.main,
#footer_nav {
margin-left: 235px;
}
}
@media only screen and (min-width: 1200px) {
.main,
#footer_nav {
margin-right: 191px;
}
}
@media only screen and (min-width: 1024px) {
.main__header {
display: table;
height: 198px;
}
}
.main__content {
padding: 22px 11px;
position: relative;
background: white;
background: rgba(255, 255, 255, 0.95);
color: #222222;
}
@media only screen and (min-width: 569px) {
.main__content {
padding: 33px;
}
}
/*------------------------------------*\
$SITE FOOTER
\*------------------------------------*/
/**
* The footer with the animated restaurant logos is a global ModX chunk for all the BR sites,
* below is the HTML structure of the chunk. The most outer div is not included in the chunk.
*
<div class="site-footer" id="footer_wrap">
<!-- this is where the chunk starts -->
<div id="footer">
<div class="footer_links simply-scroll-container">
<div class="simply-scroll-clip">
<div id="footer_links" class="simply-scroll-list">
<div class="footer_slide_img">
<a><img></a>
</div>
<div class="footer_slide_img">
<a><img></a>
</div>
<div class="footer_slide_img">
<a><img></a>
</div>
</div>
</div>
</div><!-- end #footer_links -->
<div id="footer_nav">
<div id="br_logo">
<a><img></a>
</div>
<div id="rest_green">
<img>
</div>
<div id="footer_nav_links">
<div id="address_ignite">
<a>Privacy, Terms & Conditions</a>
<a id="btmNav_copyright">Copyright 2013</a>
<a id="btmNav_ignite"><span>Restaurant Website Design & SEO by Ignite Hospitality Consultants</span> <img></a>
</div>
</div><!-- end #footer_nav_links -->
</div><!-- end #footer_nav -->
</div><!-- end #footer -->
<!-- this is where the chunk ends -->
</div>
*
*/
.site-footer {
font-size: 12px;
font-size: 0.85714rem;
line-height: 1.83333;
padding: 0 22px;
background: #222222;
}
.site-footer a {
color: white;
display: inline-block;
}
.page--bills-bar-burger .site-footer,
.page--gallery .site-footer {
display: none;
/* height: $unit * 6;
position: absolute;
top: 100%;
left: 0;
width: 100%; */
}
#footer_nav {
padding-bottom: 55px;
position: relative;
text-align: center;
margin-top: 22px;
}
#footer_nav #br_logo {
left: 22px;
}
@media only screen and (min-width: 1024px) {
#footer_nav #br_logo {
left: 0;
}
}
#footer_nav #rest_green {
right: 22px;
}
@media only screen and (min-width: 1024px) {
#footer_nav #rest_green {
right: 0;
}
}
#footer_nav #btmNav_privacy {
margin-top: 66px !important;
}
@media only screen and (min-width: 569px) {
#footer_nav #btmNav_privacy {
margin-top: 0 !important;
}
}
#footer_nav #br_logo,
#footer_nav #rest_green {
position: absolute;
top: 0;
}
#footer_nav #footer_nav_links a {
margin: 0 11px;
}
#footer_nav #btmNav_ignite {
display: block;
}
/*------------------------------------*\
$PAGE TITLE
\*------------------------------------*/
.page-title {
margin: 0;
padding-top: 11px;
padding-bottom: 11px;
font-size: 34px;
font-size: 2.42857rem;
line-height: 1.29412;
}
@media only screen and (max-width: 568px) {
.page-title {
padding-left: 22px;
}
}
@media only screen and (min-width: 569px) {
.page-title {
display: table-cell;
vertical-align: middle;
text-shadow: 10px 10px 50px black, 10px -10px 50px black;
font-size: 70px;
font-size: 5rem;
line-height: 1.25714;
}
}
.page--gallery .page-title,
.page--bills-bar-burger-rockefeller-center .page-title,
.page--bills-bar-burger-meatpacking-district .page-title,
.page--bills-bar-and-burger-atlantic-city .page-title {
font-size: 50px;
font-size: 3.57143rem;
}
.page-title--mobile {
font-size: 50px;
font-size: 3.57143rem;
line-height: 1.32;
}
@media only screen and (max-width: 568px) {
.page-title--mobile {
padding-left: 11px;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.page-title--mobile {
position: absolute;
top: 33px;
right: 0;
white-space: pre;
line-height: 77px;
}
}
@media only screen and (min-width: 1024px) {
.page-title--mobile {
display: none;
}
}
.page--bills-bar-burger .page-title--mobile {
display: none;
}
/*------------------------------------*\
$LOGO
\*------------------------------------*/
@media only screen and (max-width: 568px) {
.logo-wrapper {
border-top: 5px solid white;
border-bottom: 5px solid white;
margin-top: 11px;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.logo-wrapper {
float: left;
width: 20%;
height: 132px;
}
}
.site-logo {
display: block;
text-align: center;
}
@media only screen and (max-width: 1023px) {
.site-logo {
padding: 16px 11px 0;
}
}
@media only screen and (max-width: 568px) {
.site-logo {
width: 50%;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.site-logo {
vertical-align: middle;
}
}
@media only screen and (min-width: 1024px) {
.site-logo {
padding: 22px 0 44px;
border-top: 5px solid white;
}
}
.site-logo img {
vertical-align: bottom;
max-width: 100%;
}
@media only screen and (max-width: 568px) {
.site-logo img {
max-height: 88px;
}
}
.page--bills-bar-burger .site-logo {
border-bottom: 0;
}
/*------------------------------------*\
$TOGGLES
\*------------------------------------*/
.toggles {
border-left: 5px solid white;
float: right;
width: 50%;
}
@media only screen and (min-width: 569px) {
.toggles {
display: none;
border-left: none;
}
}
.book-toggle {
padding: 5.5px 11px;
position: relative;
text-align: center;
cursor: pointer;
font-size: 23px;
font-size: 1.64286rem;
}
.book-toggle.open {
background: white;
color: #ff8400;
}
.book-toggle .icon {
font-size: 30px;
font-size: 2.14286rem;
line-height: 1.46667;
font-weight: bold;
}
.book-drawer {
width: 100%;
max-height: 0;
overflow: hidden;
position: absolute;
right: 0;
color: #222222;
background: white;
-webkit-transition: max-height 400ms ease-in-out;
-o-transition: max-height 400ms ease-in-out;
transition: max-height 400ms ease-in-out;
z-index: 100;
}
.book-drawer.open {
max-height: 100%;
}
.nav-toggle {
padding: 5.5px 11px;
border-top: 5px solid white;
text-align: center;
cursor: pointer;
font-size: 23px;
font-size: 1.64286rem;
line-height: 1.91304;
}
.nav-toggle.open,
.nav-toggle:active {
background: white;
color: #ff8400;
}
/*------------------------------------*\
$NAVIGATION
\*------------------------------------*/
/**
* Some !important tags are needed to overwrite responsive-nav.js, see sass/vendor/_responsive-nav.scss
*/
.site-nav {
font-size: 22px;
font-size: 1.57143rem;
line-height: 1;
}
@media only screen and (max-width: 568px) {
.site-nav {
background: white;
z-index: 2;
}
.site-nav a {
color: #474747;
}
}
@media only screen and (min-width: 569px) {
.site-nav {
max-height: initial;
overflow: visible !important;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.site-nav {
border-bottom: white solid 5px;
}
}
@media only screen and (min-width: 1024px) {
.site-nav {
line-height: 33px;
margin-bottom: 22px;
font-size: 26px;
font-size: 1.85714rem;
}
}
@media only screen and (max-width: 568px) {
.site-nav__inner {
padding: 14.66667px 0;
}
}
@media only screen and (max-width: 568px) {
.site-nav__list {
position: relative;
border-top: 1px solid #b3b3b3;
padding: 0;
overflow: hidden;
}
.site-nav__list:before {
content: '';
position: absolute;
display: block;
height: 100%;
width: 50%;
top: 0;
border-right: 1px solid #b3b3b3;
}
}
@media only screen and (min-width: 1024px) {
.site-nav__list {
padding: 11px 0 !important;
border-top: 5px solid white;
border-bottom: 5px solid white;
}
}
@media only screen and (max-width: 568px) {
.site-nav__item {
padding: 22px 11px;
width: 50% !important;
position: relative;
float: left;
border-bottom: 1px solid #b3b3b3;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.site-nav__item {
display: table !important;
padding: 0 11px;
text-align: center;
width: 20% !important;
float: left;
border-top: 5px solid white;
border-right: 5px solid white;
height: 66px;
}
.site-nav__item:nth-child(5) {
border-right: 0;
}
}
.site-nav__item a {
display: table-cell;
vertical-align: middle;
}
.site-nav__item a:hover {
color: #ff8400;
}
@media only screen and (min-width: 569px) {
.site-nav__item--home {
display: none !important;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.site-nav__item--current,
.page--menus-and-food .site-nav__item--menus,
.page--special-events .site-nav__item--events,
.page--whats-happening .site-nav__item--news,
.page--gallery .site-nav__item--gallery,
.page--about-bills .site-nav__item--about,
.page--hours-and-locations .site-nav__item--locations,
.page--bills-bar-burger-rockefeller-center .site-nav__item--locations,
.page--bills-bar-burger-meatpacking-district .site-nav__item--locations,
.page--bills-bar-burger-atlantic-city .site-nav__item--locations,
.page--bills-bar-burger-downtown .site-nav__item--locations,
.page--book-here .site-nav__item--book {
background: white;
}
}
.site-nav__item--current a,
.page--menus-and-food .site-nav__item--menus a,
.page--special-events .site-nav__item--events a,
.page--whats-happening .site-nav__item--news a,
.page--gallery .site-nav__item--gallery a,
.page--about-bills .site-nav__item--about a,
.page--hours-and-locations .site-nav__item--locations a,
.page--bills-bar-burger-rockefeller-center .site-nav__item--locations a,
.page--bills-bar-burger-meatpacking-district .site-nav__item--locations a,
.page--bills-bar-burger-atlantic-city .site-nav__item--locations a,
.page--bills-bar-burger-downtown .site-nav__item--locations a,
.page--book-here .site-nav__item--book a {
color: #ff8400;
text-decoration: line-through;
}
/*------------------------------------*\
$EXPANDABLE NAVIGATION
\*------------------------------------*/
/*------------------------------------*\
$CURRENT NAVIGATION ITEM
\*------------------------------------*/
/**
* At the moment the navigation is not being pulled in dynamicly by Modx and is static instead.
*/
/*------------------------------------*\
$INTRO COPY
\*------------------------------------*/
@media only screen and (max-width: 1023px) {
.intro-copy {
display: none;
}
.page--bills-bar-burger .intro-copy {
display: block;
position: absolute;
top: 121px;
padding: 22px;
}
}
@media only screen and (max-height: 700px) {
.intro-copy {
display: none;
}
}
/*------------------------------------*\
$SITE SOCIAL
\*------------------------------------*/
.site-social {
border-top: 5px solid white;
text-align: center;
text-transform: uppercase;
}
@media only screen and (max-width: 568px) {
.site-social {
display: none;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.site-social {
float: left;
width: 20%;
height: 66px;
overflow: hidden;
}
}
@media only screen and (min-width: 1024px) {
.site-social {
position: fixed;
width: 191px;
left: 22px;
bottom: 44px;
line-height: 44px;
border: 3px solid white;
font-size: 12px;
font-size: 0.85714rem;
line-height: 1.83333;
}
}
@media only screen and (max-height: 600px) {
.site-social {
position: relative;
left: auto;
bottom: auto;
}
}
.site-social__signup {
display: block;
border-bottom: 3px solid white;
line-height: 33px;
text-align: left;
text-indent: 11px;
}
.site-social__signup:hover {
color: #ff8400;
}
@media only screen and (max-width: 1023px) {
.site-social__signup {
display: none;
}
}
@media only screen and (max-width: 1023px) {
.site-social__title {
display: none;
}
}
@media only screen and (min-width: 1024px) {
.site-social__title {
float: left;
padding: 0 11px;
font-family: "museo_slab500", Verdana, sans-serif;
line-height: 33px;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.site-social__list {
line-height: 61px;
}
}
@media only screen and (min-width: 1024px) {
.site-social__list {
float: right;
line-height: 33px;
}
}
@media only screen and (min-width: 569px) {
.site-social__item {
width: 33.333%;
border-left: 5px solid white;
float: left;
}
}
@media only screen and (max-width: 1023px) {
.site-social__item:first-child {
border-left: none;
}
}
@media only screen and (min-width: 1024px) {
.site-social__item {
padding: 0 5.5px;
border-left-width: 3px;
}
}
.site-social__item .icon {
padding: 5.5px;
border-radius: 100%;
color: #222222;
background: white;
}
.site-social__item .icon:hover {
background: #ff8400;
}
/*------------------------------------*\
$GALLERY
\*------------------------------------*/
.gallery-nav {
display: none;
border: 3px solid white;
cursor: pointer;
}
@media only screen and (min-width: 569px) {
.gallery-nav {
display: inline-block;
position: fixed;
bottom: 44px;
right: 44px;
}
}
@media only screen and (max-height: 380px) {
.gallery-nav {
position: absolute;
}
}
.gallery-nav__item {
display: inline-block;
padding: 3.66667px 22px;
color: white;
font-size: 30px;
font-size: 2.14286rem;
line-height: 1.46667;
}
.gallery-nav__item:hover {
color: #ff8400;
}
.gallery-nav__item:first-child {
border-right: 3px solid white;
}
.gallery-select {
display: inline-block;
min-width: 180px;
}
@media only screen and (max-width: 568px) {
.gallery-select {
margin-left: 22px;
}
}
.mc-image[data-href] {
cursor: pointer;
}
.mc-image[data-href=""] {
cursor: default;
}
/*------------------------------------*\
$NEWS
\*------------------------------------*/
.news__item {
height: 220px;
}
@media only screen and (min-width: 569px) {
.news__item {
float: left;
}
}
@media only screen and (min-width: 569px) {
.news__item--small {
width: 33.333%;
}
}
@media only screen and (min-width: 569px) {
.news__item--medium {
width: 63.667%;
margin-left: 3%;
}
}
/*------------------------------------*\
$POST EXCERPT
\*------------------------------------*/
* > .post-excerpt:first-child {
height: 100%;
width: 100%;
}
* > .post-excerpt:first-child .post-excerpt__box,
* > .post-excerpt:first-child .splash {
height: 352px;
}
.post-excerpt__box,
.splash {
display: block;
height: 220px;
margin-bottom: 0;
position: relative;
overflow: hidden;
}
.post-excerpt__image {
max-width: initial !important;
min-width: 100%;
}
.post-excerpt__title {
position: absolute;
bottom: 11px;
left: 11px;
margin-right: 11px;
padding: 11px;
margin-top: 0;
margin-bottom: 0;
background: white;
background: rgba(255, 255, 255, 0.75);
color: #222222;
letter-spacing: .1em;
white-space: pre;
font-size: 18px;
font-size: 1.28571rem;
line-height: 1.22222;
}
/*
.post-excerpt__more{
@extend %heading;
color: $black;
line-height: $unit * 2;
@include font-size(15px, false);
&:hover{
color: $orange;
}
}
*/
/*------------------------------------*\
$SPLASH
\*------------------------------------*/
.splash {
display: none;
}
@media only screen and (min-width: 569px) {
.splash {
display: table;
text-align: center;
letter-spacing: .1em;
}
}
.splash--two {
margin-bottom: 22px;
}
.splash__inner {
display: table-cell;
vertical-align: middle;
}
.splash__title {
margin: 0;
line-height: 44px;
font-size: 37px;
font-size: 2.64286rem;
}
.splash__title__first-line {
display: block;
line-height: 88px;
font-size: 120px;
font-size: 8.57143rem;
}
.splash__subline {
margin: 0;
color: #474747;
line-height: 22px;
font-size: 22px;
font-size: 1.57143rem;
}
/*------------------------------------*\
$PAGINATION
\*------------------------------------*/
.pagination {
clear: both;
color: #222222;
line-height: 44px;
background: url("../images/hr.png") repeat-x, url("../images/hr.png") repeat-x bottom;
}
.pagination a {
width: 49%;
color: #b3b3b3;
font-size: 22px;
font-size: 1.57143rem;
}
.pagination a:last-child {
float: right;
text-align: right;
}
/*------------------------------------*\
$SOCIAL BIT
\*------------------------------------*/
.social-bit {
clear: both;
text-align: center;
line-height: 66px;
background: url("../images/social-bit.png") center center no-repeat;
font-size: 18px;
font-size: 1.28571rem;
}
.social-bit .icon {
padding: 5.5px;
border-radius: 100%;
background: #474747;
color: #e1e1e1;
}
.social-bit .icon:hover {
background: #ff8400;
}
/*------------------------------------*\
$POST
\*------------------------------------*/
.post__header {
min-height: 132px;
max-height: 396px;
position: relative;
overflow: hidden;
}
.post__title {
padding: 11px;
margin-top: 0;
position: absolute;
top: 22px;
left: 22px;
background: #222222;
background: rgba(34, 34, 34, 0.9);
color: white;
}
.post__image {
width: 100%;
}
.post__copy {
overflow: hidden;
}
.post__aside {
letter-spacing: .1em;
font-size: 18px;
font-size: 1.28571rem;
line-height: 1.22222;
}
@media only screen and (min-width: 569px) {
.post__aside {
float: right;
margin-left: 22px;
}
}
.post__back {
display: inline-block;
background: #ff8400;
padding: 11px;
white-space: pre;
}
.post__back a {
color: white;
}
/*------------------------------------*\
$POST SOCIAL
\*------------------------------------*/
.post-social__title {
margin: 0;
line-height: 44px;
font-size: 20px;
font-size: 1.42857rem;
}
.post-social__item {
display: block;
padding: 11px;
margin-bottom: 11px;
color: white;
}
.post-social__item:after {
content: '>';
margin-left: 22px;
}
.post-social__item .icon {
display: inline-block;
padding: 3.66667px;
margin-right: 11px;
background: white;
border-radius: 50px;
}
.post-social__item--twitter {
background-color: #4099ff;
}
.post-social__item--twitter .icon {
color: #4099ff;
}
.post-social__item--facebook {
background-color: #3b5998;
}
.post-social__item--facebook .icon {
color: #3b5998;
}
/*------------------------------------*\
$NEWS BREAKOUT
\*------------------------------------*/
.news-breakout {
display: block;
margin-top: 22px;
text-align: center;
}
/*------------------------------------*\
$EVENT VENUE
\*------------------------------------*/
.events {
margin: 44px 0;
text-align: center;
}
.event-venue {
padding: 0 11px;
margin-bottom: 22px;
}
@media only screen and (min-width: 700px) {
.event-venue {
display: inline-block;
width: 33%;
margin-bottom: 0;
vertical-align: top;
}
}
.event-venue__image {
display: inline-block;
}
.event-venue__title {
font-family: "Museo500Regular", sans-serif;
letter-spacing: .1em;
white-space: pre;
font-size: 16px;
font-size: 1.14286rem;
line-height: 1.375;
}
.event-venue__title a {
color: #222222;
}
.event-venue__link {
display: inline-block;
padding: 11px;
color: white;
background-color: #ff8400;
}
.event-venue__link i {
display: inline-block;
margin-left: 11px;
}
/*------------------------------------*\
$BOX HEADING
\*------------------------------------*/
.box-heading {
padding-bottom: 3px;
margin-top: 0;
background: url("../images/hr-2.png") repeat-x bottom;
font-size: 18px;
font-size: 1.28571rem;
line-height: 1.22222;
}
.box-heading span {
display: inline-block;
padding: 11px 22px;
margin-left: 11px;
background-color: #2b2b2b;
color: white;
}
/*------------------------------------*\
$MENU LOCATIONS
\*------------------------------------*/
.menu-locations,
#map_accordion ul {
background: url("../images/hr-2.png") top repeat-x, url("../images/hr-2.png") bottom repeat-x;
padding: 11px 0;
}
@media only screen and (min-width: 569px) {
.menu-locations,
#map_accordion ul {
text-align: center;
}
}
.menu-locations__item,
#map_accordion li {
letter-spacing: .1em;
font-size: 22px;
font-size: 1.57143rem;
line-height: 1;
}
@media only screen and (min-width: 569px) {
.menu-locations__item,
#map_accordion li {
display: inline-block;
margin: 0 22px;
}
}
.menu-locations__item a,
#map_accordion li a {
display: block;
color: #b3b3b3;
line-height: 44px;
}
@media only screen and (min-width: 1024px) {
.menu-locations__item--active {
position: relative;
}
.menu-locations__item--active::after {
display: block;
content: '';
position: absolute;
bottom: -8px;
border: transparent 11px solid;
border-bottom-color: #2b2b2b;
left: 50%;
margin-left: -5.5px;
}
}
.menu-locations__item--active a {
color: #2b2b2b;
}
/*------------------------------------*\
$MENU LOCATION
\*------------------------------------*/
.menu-location {
font-family: "museo_slab500", Verdana, sans-serif;
}
/*------------------------------------*\
$MENU NAV
\*------------------------------------*/
.menu-nav {
float: left;
margin-right: 22px;
}
@media only screen and (min-width: 569px) {
.menu-nav {
margin-right: 44px;
}
}
.menu-nav__list {
text-transform: uppercase;
}
.menu-nav__item > a {
display: block;
color: #474747;
}
.menu-nav--sub__list {
text-transform: capitalize;
list-style: none;
padding-left: 0;
margin-top: 11px;
}
@media only screen and (min-width: 569px) {
.menu-nav--sub__list {
padding-left: 22px;
}
}
.menu-nav--sub__list a {
color: #b3b3b3;
}
.menu-nav--sub__item--active a {
color: #ff8400;
}
/*------------------------------------*\
$MENU
\*------------------------------------*/
.menu {
overflow: hidden;
position: relative;
}
.menu__title {
margin-top: 0;
font-family: "Museo500Regular", sans-serif;
font-size: 28px;
font-size: 2rem;
}
.menu__item__title {
text-transform: uppercase;
}
/*------------------------------------*\
$DELIVERY BUTTON
\*------------------------------------*/
.delivery-button {
margin-right: 11px;
margin-bottom: 11px;
font-size: 13px;
font-size: 0.92857rem;
line-height: 1.69231;
}
/*------------------------------------*\
$VENUE
\*------------------------------------*/
.venue__header {
height: 308px;
margin-bottom: 33px;
position: relative;
overflow: hidden;
}
.venue__header .button,
.venue__header .venue-menus a,
.venue-menus .venue__header a {
position: absolute;
right: 0;
top: 0;
}
.venue__header img {
max-width: initial;
min-width: 100%;
}
.venue__footer {
padding: 22px 0 22px;
margin: 0;
position: absolute;
bottom: 0;
left: 0;
width: 100%;
background: url("../images/text-footer.png") left bottom repeat-x;
color: white;
text-align: center;
font-size: 20px;
font-size: 1.42857rem;
line-height: 1.1;
}
.venue__footer a {
color: white;
text-decoration: underline;
}
.venue__caption {
width: 100%;
padding: 22px;
margin-bottom: 0;
position: absolute;
bottom: 0;
left: 0;
background-color: #222222;
background-color: rgba(0, 0, 0, 0.7);
font-family: "museo_slab500", Verdana, sans-serif;
color: white;
font-size: 17px;
font-size: 1.21429rem;
line-height: 1.29412;
}
.venue__row {
padding-bottom: 33px;
margin-bottom: 33px;
background: url("../images/hr-2.png") bottom repeat-x;
}
.venue__row > :last-child {
margin-right: 0;
}
* > .venue__row:last-child {
background: none;
padding-bottom: 0;
}
@media only screen and (max-width: 568px) {
.venue__item {
margin-bottom: 44px;
}
}
@media only screen and (min-width: 569px) {
.venue__item {
width: 31.3%;
float: left;
margin-right: 3%;
}
}
.venue__item .title {
font-size: 22px;
font-size: 1.57143rem;
line-height: 1;
}
.venue__item > :first-child {
margin-top: 0;
}
@media only screen and (min-width: 569px) {
.venue__item--double {
width: 65.6%;
}
}
.venue-menus {
padding-right: 22px !important;
}
.venue-menus li {
margin-bottom: 11px;
text-align: center;
font-family: "museo_slab500", Verdana, sans-serif;
font-size: 12px;
font-size: 0.85714rem;
line-height: 1.83333;
}
.venue-menus a {
display: block;
background-color: #b3b3b3;
}
.venue-menus a:after {
content: ' >';
}
.venue-opening p:last-child {
margin-bottom: 0;
}
.venue-quote {
background: url("../images/venue-quote.png") 11px 44px no-repeat;
}
.venue-quote blockquote {
padding: 33px 22px;
}
.venue-contact {
color: #ff8400;
}
.venue-contact img {
margin-top: 22px;
}
.venue-contact .title {
font-size: 20px;
font-size: 1.42857rem;
line-height: 1.1;
margin-top: 0;
}
.venue-link {
overflow: hidden;
}
.venue-link a {
display: block;
position: relative;
text-align: center;
}
.venue-link h2 {
position: relative;
z-index: 1;
display: inline-block;
margin: 44px 0;
padding: 11px;
background: #222222;
background-color: rgba(34, 34, 34, 0.7);
color: white;
}
.venue-link img {
min-width: 100%;
position: absolute;
top: 0;
left: 0;
}
.venue-link--floorplan {
position: relative;
text-align: center;
}
.venue-link--floorplan > a:first-child {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
z-index: 10;
}
.venue-link--floorplan h2 {
font-size: 22px;
font-size: 1.57143rem;
line-height: 1;
}
.venue-link--ad a {
text-align: left;
}
.venue-link--ad .title {
margin: 0 0 88px;
background-color: transparent;
text-shadow: 10px 10px 50px black;
}
/*------------------------------------*\
$LOCATION BOX
\*------------------------------------*/
.location-box {
padding: 44px 0;
margin-bottom: 0;
border-bottom: 2px dotted #b3b3b3;
}
.location-box:last-child {
border-bottom: none;
padding-bottom: 0;
}
.location-box__image {
height: 198px;
width: 198px;
overflow: hidden;
float: none;
margin: 0 auto 44px;
}
@media only screen and (max-width: 568px) {
.location-box__image {
display: block;
margin-bottom: 22px;
}
}
@media only screen and (min-width: 569px) {
.location-box__image {
float: left;
margin-right: 33px;
margin-bottom: 0;
}
}
.location-box__image img {
border-radius: 198px;
min-height: 100%;
min-width: 100%;
}
.location-box__title {
margin: 0;
font-family: "museo_slab500", Verdana, sans-serif;
font-size: 16px;
font-size: 1.14286rem;
line-height: 1.375;
}
@media only screen and (min-width: 1200px) {
.location-box__left,
.location-box__right {
display: inline-block;
width: 48%;
padding-right: 22px;
vertical-align: top;
}
}
.location-box__right {
padding-right: 66px;
}
.foot-note {
padding: 11px 0;
border-top: 2px dotted #b3b3b3;
border-bottom: 2px dotted #b3b3b3;
margin-bottom: 0;
}
/*------------------------------------*\
$BOOKING WIDGET
\*------------------------------------*/
.booking-widget {
width: 360px;
height: 242px;
padding: 91px 35px 34px 24px;
font-size: 14px !important;
color: #222222 !important;
background: url("../images/booking-widget.png") no-repeat;
}
@media only screen and (max-width: 360px) {
.booking-widget {
position: relative;
left: -33px;
}
}
@media only screen and (max-width: 568px) {
.booking-widget {
margin: 22px auto 0;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.booking-widget {
margin: 0 auto 22px;
}
}
@media only screen and (min-width: 569px) and (max-width: 1023px) {
.page--bills-bar-burger .main .booking-widget,
.page--bills-bar-burger #footer_nav .booking-widget {
margin-top: 55px;
}
}
@media only screen and (max-width: 568px) {
.page--bills-bar-burger .main .booking-widget,
.page--bills-bar-burger #footer_nav .booking-widget,
.page--gallery .booking-widget {
display: none;
}
}
@media only screen and (min-width: 569px) {
.page--bills-bar-burger .main .booking-widget,
.page--bills-bar-burger #footer_nav .booking-widget,
.page--gallery .booking-widget {
margin-left: 0;
}
}
@media only screen and (min-width: 1024px) {
.page--bills-bar-burger .main .booking-widget,
.page--bills-bar-burger #footer_nav .booking-widget,
.page--gallery .booking-widget {
left: auto;
position: fixed;
top: 24px;
right: 24px;
}
}
.page--hours-and-locations .booking-widget,
.page--book-here .booking-widget,
.venue-page .booking-widget {
background: url("../images/booking-widget-dark.png") no-repeat;
}
.page--hours-and-locations .booking-widget {
margin: 22px auto 0;
}
@media only screen and (min-width: 1024px) {
.page--hours-and-locations .booking-widget {
width: 726px;
height: auto;
padding: 23px 0 20px 20px;
background: url("../images/booking-widget-vertical.png") top left no-repeat;
}
.page--hours-and-locations .booking-widget .rowElem,
.page--hours-and-locations .booking-widget .submit {
width: 23%;
}
.page--hours-and-locations .booking-widget .submit {
width: auto;
}
.page--hours-and-locations .booking-widget .submit .icon {
float: none;
}
}
.booking-widget .rowElem,
.booking-widget .submit {
display: block;
margin: 0 11px 11px 0;
width: 45%;
float: left;
clear: none !important;
}
.booking-widget .rowElem {
position: relative;
}
.booking-widget .rowElem .jqTransformInputInner div {
margin: 0;
}
.booking-widget .rowElem .icon {
position: absolute;
z-index: 100;
font-size: 18px;
top: 20%;
right: 8%;
color: #222222;
}
.booking-widget .submit .button,
.booking-widget .submit .venue-menus a,
.venue-menus .booking-widget .submit a {
display: block;
padding-top: 0;
padding-bottom: 0;
line-height: 34px;
}
.booking-widget .submit .icon {
font-size: 12px;
float: right;
}
.booking-widget .startdate,
.booking-widget .resttime {
height: 35px;
background: #fff;
border: 1px solid #dfdfdf;
}
.booking-widget .startdate input,
.booking-widget .resttime input {
text-indent: 12px;
outline: none;
}
@media only screen and (max-width: 720px) {
.page--book-here .media__img--rev,
.page--bills-bar-burger-rockefeller-center .media__img--rev,
.page--bills-bar-burger-meatpacking-district .media__img--rev,
.page--bills-bar-and-burger-atlantic-city .media__img--rev {
float: none;
margin-left: 0;
}
}
/*------------------------------------*\
$BOOK BUTTON
\*------------------------------------*/
.book-circle {
font-size: 22px;
font-size: 1.57143rem;
line-height: 1;
display: table;
width: 154px;
height: 154px;
border-radius: 100%;
background: #ee3124;
color: white;
text-align: center;
}
@media only screen and (max-width: 1023px) {
/* 1commentsdif sdf */
.book-circle {
display: none; /* another comment */
}
}
.main__content .book-circle {
position: absolute;
top: -176px;
right: 22px;
}
.page--gallery .book-circle {
position: absolute;
top: 44px;
right: 44px;
display: none;
}
@media only screen and (min-width: 1024px) {
.page--gallery .book-circle.is-visible {
display: table;
}
}
.book-circle__inner {
display: table-cell;
vertical-align: middle;
}
.book-circle__inner .icon {
display: block;
margin: 0 auto 11px;
}
/*------------------------------------*\
$LEAD
\*------------------------------------*/
.lead {
font-size: 16px;
font-size: 1.14286rem;
line-height: 1.375;
}
.venue-lead {
padding-bottom: 33px;
margin-bottom: 33px;
background: url("../images/hr-2.png") repeat-x bottom;
line-height: 33px;
font-size: 20px;
font-size: 1.42857rem;
}
/*------------------------------------*\
$PAGE BACKGROUNDS
\*------------------------------------*/
/**
* Maybe create a mixin for this? Something like:
*
@mixin background-cover($url, $attachment:false){
background: url('$url');
background-size: cover;
filter: progid:DXImageTransform.Microsoft.AlphaImageLoader( src='$url', sizingMethod='scale');
-ms-filter: "progid:DXImageTransform.Microsoft.AlphaImageLoader( src='$url', sizingMethod='scale')";
@if $attachment == fixed{
background-attachment: $attachment;
}
}
@include background-cover('../images/gallery/meatpacking/large/image2.jpg', fixed); // optional 'background-attachment: fixed'?
*
*/
.page--hours-and-locations,
.page--menus-and-food,
.page--bills-bar-burger-rockefeller-center,
.page--bills-bar-burger-meatpacking-district,
.page--bills-bar-burger-atlantic-city,
.page--bills-bar-burger-downtown {
background: url("../images/backgrounds/menus-and-food.jpg");
background-attachment: fixed;
background-size: cover;
}
.page--special-events {
background: url("../images/backgrounds/special-events.jpg");
background-attachment: fixed;
background-size: cover;
}
.page--about-bills,
.page--whats-happening {
background: url("../images/backgrounds/news.jpg");
background-attachment: fixed;
background-size: cover;
}
.page--book-here {
background: url("../images/backgrounds/book-here.jpg");
background-attachment: fixed;
background-size: cover;
}
/*------------------------------------*\
$JQUERY UI
\*------------------------------------*/
/**
* Jquery UI widgets are not build with the new box-model, so we need to reset it just for Jquery UI
*/
.ui-widget * {
-moz-box-sizing: content-box !important;
box-sizing: content-box !important;
}
/*------------------------------------*\
$JQTRANSFORM
\*------------------------------------*/
/**
* The CSS below is used to style form elements through a Jquery plugin called Jqtransform.
* See _jqtransform.scss in the vendor folder.
*/
.jqTransformSelectWrapper {
width: 100% !important;
}
.jqTransformSelectWrapper div span {
display: block !important;
width: 135px;
color: #222222 !important;
text-transform: none;
}
#opentableRezervtionForm .jqTransformSelectWrapper {
color: #222222;
}
#opentableRezervtionForm .jqTransformSelectWrapper a.jqTransformSelectOpen,
#navSelect .jqTransformSelectWrapper a.jqTransformSelectOpen {
z-index: 999px;
font-size: 18px;
width: auto;
height: auto;
top: 12%;
right: 7%;
color: #222222;
}
.jqTransformSelectWrapper a.jqTransformSelectOpen::after {
font-family: 'bills';
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
-webkit-font-smoothing: antialiased;
content: "\e009";
}
.jqTransformSelectWrapper ul {
position: relative;
top: 100%;
overflow: auto;
margin: 0;
padding: 0;
width: 100% !important;
height: 140px;
border-right: 1px solid #CCC;
border-bottom: 1px solid #CCC;
border-left: 1px solid #CCC;
background-color: #FFF;
list-style-type: none;
text-align: left;
position: absolute;
}
.jqTransformSelectWrapper ul a.selected {
background: #fff;
}
.jqTransformSelectWrapper ul a:hover {
background: #e4e0e0;
}
.divElem {
position: relative;
}
#show_datepicker,
#show_timepicker {
position: absolute;
top: 4px;
right: 9px;
}
#show_datepicker:hover,
#show_timepicker:hover {
cursor: pointer;
}
#ui-timepicker-div {
position: relative;
margin-top: 35px;
}
.jqTransformInputInner div input {
width: 100% !important;
line-height: 22px !important;
position: relative !important;
z-index: 100 !important;
border-radius: 0 !important;
}
#map_canvas,
#map_canvas_loc {
height: 400px;
}
#map_tabs {
display: none;
/* I could not disable this in the snippet so I had to hide it like this */
}
#map_accordion {
margin-top: -25px;
}
#map_accordion ul {
padding: 11px 0;
margin: 0 0 22px;
list-style: none;
background: url("../images/hr-2.png") bottom repeat-x;
}
#map_accordion li {
margin: 0 11px !important;
}
#map_accordion li a {
color: #222222;
}
.map_print {
display: none;
}
/*------------------------------------*\
$VENUE BOX (OLD)
\*------------------------------------*/
/**
* All of the'.venue_'... CSS is copied from the old website
* and is used to style the popup boxes in the google maps widget
*/
.venue_box {
width: 272px;
display: inline;
float: left;
margin: 0 17px;
color: #000;
height: 220px !important;
/* I had to add a hight with !important to keep it from breaking */
}
.venue_wrap {
width: 252px;
text-align: center;
background: #fff;
font: 0.8em Arial, Helvetica, sans-serif;
padding: 10px;
}
.venue_image {
height: 115px;
width: 252px;
overflow: hidden;
}
.venue_image img {
height: 115px;
}
.venue_text {
border-bottom: 1px solid #000;
padding: 5px;
overflow: hidden;
}
.venue_info {
height: 50px;
margin-top: 3px;
}
.venue_mealtime {
display: block;
clear: both;
}
.venue_contact {
margin-top: 5px;
}
.venue_contact a {
color: #000;
text-decoration: none;
margin-left: 5px;
}
.venue_contact a:hover {
color: #000;
text-decoration: underline;
}
.venue_cost {
display: none;
margin: 2px auto;
width: 68px;
}
.venue_cuisine {
display: none;
}
.venue_address {
clear: both;
}
.venue_actions {
background: #ff0066;
padding: 2px 0;
}
.venue_actions a {
color: #fff;
margin-left: 8px;
text-decoration: none;
}
.venue_actions a:hover {
text-decoration: underline;
color: #fff;
}
.venue_social {
margin: 15px 0 10px 0;
}
.venue_social a {
margin-right: 10px;
}
@media only screen {
/* hihihih */
html{
color:red; /* hsdfjsdjhf */
line-height: 200;
}
}
@media print {
html{
color:red;
line-height: 200;
}
}
@-webkit-keyframes NAME-YOUR-ANIMATION {
/* hahahah */
0% { opacity: 0; /* helphelp */ }
100% { opacity: 1; }
}
@-moz-keyframes NAME-YOUR-ANIMATION {
0% { opacity: 0; }
100% { opacity: 1; }
}
@-o-keyframes NAME-YOUR-ANIMATION {
0% { opacity: 0; }
100% { opacity: 1; }
}
@keyframes NAME-YOUR-ANIMATION {
0% { opacity: 0; }
100% { opacity: 1; }
} | alexsmander/alexmattorr | wp-content/themes/portfolio/node_modules/grunt-combine-media-queries/test/test3.css | CSS | gpl-2.0 | 59,443 |
<?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* The library file for the MongoDB store plugin.
*
* This file is part of the MongoDB store plugin, it contains the API for interacting with an instance of the store.
*
* @package cachestore_mongodb
* @copyright 2012 Sam Hemelryk
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
defined('MOODLE_INTERNAL') || die();
/**
* The MongoDB Cache store.
*
* This cache store uses the MongoDB Native Driver.
* For installation instructions have a look at the following two links:
* - {@link http://www.php.net/manual/en/mongo.installation.php}
* - {@link http://www.mongodb.org/display/DOCS/PHP+Language+Center}
*
* @copyright 2012 Sam Hemelryk
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
class cachestore_mongodb extends cache_store implements cache_is_configurable {
/**
* The name of the store
* @var string
*/
protected $name;
/**
* The server connection string. Comma separated values.
* @var string
*/
protected $server = 'mongodb://127.0.0.1:27017';
/**
* The database connection options
* @var array
*/
protected $options = array();
/**
* The name of the database to use.
* @var string
*/
protected $databasename = 'mcache';
/**
* The Connection object
* @var Mongo
*/
protected $connection = false;
/**
* The Database Object
* @var MongoDB
*/
protected $database;
/**
* The Collection object
* @var MongoCollection
*/
protected $collection;
/**
* Determines if and what safe setting is to be used.
* @var bool|int
*/
protected $usesafe = false;
/**
* If set to true then multiple identifiers will be requested and used.
* @var bool
*/
protected $extendedmode = false;
/**
* The definition has which is used in the construction of the collection.
* @var string
*/
protected $definitionhash = null;
/**
* Set to true once this store is ready to be initialised and used.
* @var bool
*/
protected $isready = false;
/**
* Set to true if the Mongo extension is < version 1.3.
* If this is the case we must use the legacy Mongo class instead of MongoClient.
* Mongo is backwards compatible, although obviously deprecated.
* @var bool
*/
protected $legacymongo = false;
/**
* Constructs a new instance of the Mongo store.
*
* Noting that this function is not an initialisation. It is used to prepare the store for use.
* The store will be initialised when required and will be provided with a cache_definition at that time.
*
* @param string $name
* @param array $configuration
*/
public function __construct($name, array $configuration = array()) {
$this->name = $name;
if (array_key_exists('server', $configuration)) {
$this->server = $configuration['server'];
}
if (array_key_exists('replicaset', $configuration)) {
$this->options['replicaSet'] = (string)$configuration['replicaset'];
}
if (array_key_exists('username', $configuration) && !empty($configuration['username'])) {
$this->options['username'] = (string)$configuration['username'];
}
if (array_key_exists('password', $configuration) && !empty($configuration['password'])) {
$this->options['password'] = (string)$configuration['password'];
}
if (array_key_exists('database', $configuration)) {
$this->databasename = (string)$configuration['database'];
}
if (array_key_exists('usesafe', $configuration)) {
$this->usesafe = $configuration['usesafe'];
}
if (array_key_exists('extendedmode', $configuration)) {
$this->extendedmode = $configuration['extendedmode'];
}
// Test if the MongoClient class exists, if not we need to switch to legacy classes.
$this->legacymongo = (!class_exists('MongoClient'));
// MongoClient from Mongo 1.3 onwards. Mongo for earlier versions.
$class = ($this->legacymongo) ? 'Mongo' : 'MongoClient';
try {
$this->connection = new $class($this->server, $this->options);
$this->isready = true;
} catch (MongoConnectionException $e) {
// We only want to catch MongoConnectionExceptions here.
}
}
/**
* Returns true if the requirements of this store have been met.
* @return bool
*/
public static function are_requirements_met() {
return class_exists('MongoClient') || class_exists('Mongo');
}
/**
* Returns the supported features.
* @param array $configuration
* @return int
*/
public static function get_supported_features(array $configuration = array()) {
$supports = self::SUPPORTS_DATA_GUARANTEE;
if (array_key_exists('extendedmode', $configuration) && $configuration['extendedmode']) {
$supports += self::SUPPORTS_MULTIPLE_IDENTIFIERS;
}
return $supports;
}
/**
* Returns an int describing the supported modes.
* @param array $configuration
* @return int
*/
public static function get_supported_modes(array $configuration = array()) {
return self::MODE_APPLICATION;
}
/**
* Initialises the store instance for use.
*
* Once this has been done the cache is all set to be used.
*
* @param cache_definition $definition
* @throws coding_exception
*/
public function initialise(cache_definition $definition) {
if ($this->is_initialised()) {
throw new coding_exception('This mongodb instance has already been initialised.');
}
$this->database = $this->connection->selectDB($this->databasename);
$this->definitionhash = 'm'.$definition->generate_definition_hash();
$this->collection = $this->database->selectCollection($this->definitionhash);
$options = array('name' => 'idx_key');
if ($this->legacymongo) {
$options['safe'] = $this->usesafe;
} else {
$options['w'] = $this->usesafe ? 1 : 0;
}
$this->collection->ensureIndex(array('key' => 1), $options);
}
/**
* Returns true if this store instance has been initialised.
* @return bool
*/
public function is_initialised() {
return ($this->database instanceof MongoDB);
}
/**
* Returns true if this store instance is ready to use.
* @return bool
*/
public function is_ready() {
return $this->isready;
}
/**
* Returns true if the given mode is supported by this store.
* @param int $mode
* @return bool
*/
public static function is_supported_mode($mode) {
return ($mode == self::MODE_APPLICATION || $mode == self::MODE_SESSION);
}
/**
* Returns true if this store is making use of multiple identifiers.
* @return bool
*/
public function supports_multiple_identifiers() {
return $this->extendedmode;
}
/**
* Retrieves an item from the cache store given its key.
*
* @param string $key The key to retrieve
* @return mixed The data that was associated with the key, or false if the key did not exist.
*/
public function get($key) {
if (!is_array($key)) {
$key = array('key' => $key);
}
$result = $this->collection->findOne($key);
if ($result === null || !array_key_exists('data', $result)) {
return false;
}
$data = @unserialize($result['data']);
return $data;
}
/**
* Retrieves several items from the cache store in a single transaction.
*
* If not all of the items are available in the cache then the data value for those that are missing will be set to false.
*
* @param array $keys The array of keys to retrieve
* @return array An array of items from the cache.
*/
public function get_many($keys) {
if ($this->extendedmode) {
$query = $this->get_many_extendedmode_query($keys);
$keyarray = array();
foreach ($keys as $key) {
$keyarray[] = $key['key'];
}
$keys = $keyarray;
$query = array('key' => array('$in' => $keys));
} else {
$query = array('key' => array('$in' => $keys));
}
$cursor = $this->collection->find($query);
$results = array();
foreach ($cursor as $result) {
$id = (string)$result['key'];
$results[$id] = unserialize($result['data']);
}
foreach ($keys as $key) {
if (!array_key_exists($key, $results)) {
$results[$key] = false;
}
}
return $results;
}
/**
* Sets an item in the cache given its key and data value.
*
* @param string $key The key to use.
* @param mixed $data The data to set.
* @return bool True if the operation was a success false otherwise.
*/
public function set($key, $data) {
if (!is_array($key)) {
$record = array(
'key' => $key
);
} else {
$record = $key;
}
$record['data'] = serialize($data);
$options = array('upsert' => true);
if ($this->legacymongo) {
$options['safe'] = $this->usesafe;
} else {
$options['w'] = $this->usesafe ? 1 : 0;
}
$this->delete($key);
$result = $this->collection->insert($record, $options);
if ($result === true) {
// Safe mode is off.
return true;
} else if (is_array($result)) {
if (empty($result['ok']) || isset($result['err'])) {
return false;
}
return true;
}
// Who knows?
return false;
}
/**
* Sets many items in the cache in a single transaction.
*
* @param array $keyvaluearray An array of key value pairs. Each item in the array will be an associative array with two
* keys, 'key' and 'value'.
* @return int The number of items successfully set. It is up to the developer to check this matches the number of items
* sent ... if they care that is.
*/
public function set_many(array $keyvaluearray) {
$count = 0;
foreach ($keyvaluearray as $pair) {
$result = $this->set($pair['key'], $pair['value']);
if ($result === true) {
$count++;
}
}
return $count;
}
/**
* Deletes an item from the cache store.
*
* @param string $key The key to delete.
* @return bool Returns true if the operation was a success, false otherwise.
*/
public function delete($key) {
if (!is_array($key)) {
$criteria = array(
'key' => $key
);
} else {
$criteria = $key;
}
$options = array('justOne' => false);
if ($this->legacymongo) {
$options['safe'] = $this->usesafe;
} else {
$options['w'] = $this->usesafe ? 1 : 0;
}
$result = $this->collection->remove($criteria, $options);
if ($result === true) {
// Safe mode.
return true;
} else if (is_array($result)) {
if (empty($result['ok']) || isset($result['err'])) {
return false;
} else if (empty($result['n'])) {
// Nothing was removed.
return false;
}
return true;
}
// Who knows?
return false;
}
/**
* Deletes several keys from the cache in a single action.
*
* @param array $keys The keys to delete
* @return int The number of items successfully deleted.
*/
public function delete_many(array $keys) {
$count = 0;
foreach ($keys as $key) {
if ($this->delete($key)) {
$count++;
}
}
return $count;
}
/**
* Purges the cache deleting all items within it.
*
* @return boolean True on success. False otherwise.
*/
public function purge() {
if ($this->isready) {
$this->collection->drop();
$this->collection = $this->database->selectCollection($this->definitionhash);
}
return true;
}
/**
* Takes the object from the add instance store and creates a configuration array that can be used to initialise an instance.
*
* @param stdClass $data
* @return array
*/
public static function config_get_configuration_array($data) {
$return = array(
'server' => $data->server,
'database' => $data->database,
'extendedmode' => (!empty($data->extendedmode))
);
if (!empty($data->username)) {
$return['username'] = $data->username;
}
if (!empty($data->password)) {
$return['password'] = $data->password;
}
if (!empty($data->replicaset)) {
$return['replicaset'] = $data->replicaset;
}
if (!empty($data->usesafe)) {
$return['usesafe'] = true;
if (!empty($data->usesafevalue)) {
$return['usesafe'] = (int)$data->usesafevalue;
$return['usesafevalue'] = $return['usesafe'];
}
}
return $return;
}
/**
* Allows the cache store to set its data against the edit form before it is shown to the user.
*
* @param moodleform $editform
* @param array $config
*/
public static function config_set_edit_form_data(moodleform $editform, array $config) {
$data = array();
if (!empty($config['server'])) {
$data['server'] = $config['server'];
}
if (!empty($config['database'])) {
$data['database'] = $config['database'];
}
if (isset($config['extendedmode'])) {
$data['extendedmode'] = (bool)$config['extendedmode'];
}
if (!empty($config['username'])) {
$data['username'] = $config['username'];
}
if (!empty($config['password'])) {
$data['password'] = $config['password'];
}
if (!empty($config['replicaset'])) {
$data['replicaset'] = $config['replicaset'];
}
if (isset($config['usesafevalue'])) {
$data['usesafe'] = true;
$data['usesafevalue'] = (int)$data['usesafe'];
} else if (isset($config['usesafe'])) {
$data['usesafe'] = (bool)$config['usesafe'];
}
$editform->set_data($data);
}
/**
* Performs any necessary clean up when the store instance is being deleted.
*/
public function instance_deleted() {
// We can't use purge here that acts upon a collection.
// Instead we must drop the named database.
if ($this->connection) {
$connection = $this->connection;
} else {
try {
// MongoClient from Mongo 1.3 onwards. Mongo for earlier versions.
$class = ($this->legacymongo) ? 'Mongo' : 'MongoClient';
$connection = new $class($this->server, $this->options);
} catch (MongoConnectionException $e) {
// We only want to catch MongoConnectionExceptions here.
// If the server cannot be connected to we cannot clean it.
return;
}
}
$database = $connection->selectDB($this->databasename);
$database->drop();
$connection = null;
$database = null;
// Explicitly unset things to cause a close.
$this->collection = null;
$this->database = null;
$this->connection = null;
}
/**
* Generates an instance of the cache store that can be used for testing.
*
* @param cache_definition $definition
* @return false
*/
public static function initialise_test_instance(cache_definition $definition) {
if (!self::are_requirements_met()) {
return false;
}
$config = get_config('cachestore_mongodb');
if (empty($config->testserver)) {
return false;
}
$configuration = array();
$configuration['server'] = $config->testserver;
if (!empty($config->testreplicaset)) {
$configuration['replicaset'] = $config->testreplicaset;
}
if (!empty($config->testusername)) {
$configuration['username'] = $config->testusername;
}
if (!empty($config->testpassword)) {
$configuration['password'] = $config->testpassword;
}
if (!empty($config->testdatabase)) {
$configuration['database'] = $config->testdatabase;
}
$configuration['usesafe'] = 1;
if (!empty($config->testextendedmode)) {
$configuration['extendedmode'] = (bool)$config->testextendedmode;
}
$store = new cachestore_mongodb('Test mongodb', $configuration);
if (!$store->is_ready()) {
return false;
}
$store->initialise($definition);
return $store;
}
/**
* Returns the name of this instance.
* @return string
*/
public function my_name() {
return $this->name;
}
}
| sameertechworks/wpmoodle | moodle/cache/stores/mongodb/lib.php | PHP | gpl-2.0 | 18,430 |
*Please include a short description of problem here*
**I hereby confirm that I have:**
- [ ] Tried to solve the issue on my own
- [ ] Retried to run my code with the latest version of The COBRA Toolbox
- [ ] Checked that a similar issue has not already been opened
*(Note: You may replace [ ] with [X] to check the box)*
| shjchan/cobratoolbox | .github/ISSUE_TEMPLATE.md | Markdown | gpl-3.0 | 324 |
<head>
<link href="/assets/frappe/css/c3.min.css" rel="stylesheet" type="text/css">
<script type="text/javascript" src="/assets/frappe/js/lib/d3.min.js"></script>
<script type="text/javascript" src="/assets/frappe/js/lib/c3.min.js"></script>
<script type="text/javascript">
onReady("#chart_div", function() {
var chartData = [];
{% var q = 0; %}
{% for(var j=0, m=data.length+1; j<m; j++) { %}
var tempData{%=j%} = [];
{% for(var i=1, l=report.columns.length; i<l; i++) { %}
{% if(__(report.columns[i].label) != __("Quotation")) { %}
{% if(j == 0) { %}
{% if(i == 1) { %}
tempData{%=j%}[{%=i%}-1] = \"x\";
{% } else { %}
tempData{%=j%}[{%=i%}-1] = Math.log(parseInt(\"{%= report.columns[i].label %}\".replace(\"Qty: \",\"\"))) / Math.LN10;
{% } %}
{% } else { %}
{% if(i == 1) { %}
tempData{%=j%}[{%=i%}-1] = \"{%= data[j-1][report.columns[i].fieldname] %} \";
{% } else { %}
tempData{%=j%}[{%=i%}-1] = {% if(data[j-1][report.columns[i].fieldname] == "") { if (i > 2) { %}
tempData{%=j%}[{%=i%}-2]
{% } else { %}
0
{% } } else { %}
{%= data[j-1][report.columns[i].fieldname] %}
{% } %};
{% } %};
{% } %}
{% } else { %}
{% if(j == 0) { %}
{% if(i < l-1) { %}
tempData{%=j%}[{%=i%}-1] = Math.log(parseInt(\"{%= report.columns[i+1].label %}\".replace(\"Qty: \",\"\"))-1) / Math.LN10;
{% } else { %}
tempData{%=j%}[{%=i%}-1] = Math.log(2*parseInt(\"{%= report.columns[i-1].label %}\".replace(\"Qty: \",\"\"))) / Math.LN10;
{% } %}
{% } else { %}
tempData{%=j%}[{%=i%}-1] = tempData{%=j%}[{%=i%}-2];
{% } %}
{% } %}
{% } %}
chartData[{%=j%}] = tempData{%=j%};
{% } %}
console.log(chartData);
hold = {
bindto: "#chart_div" ,data: {
x: "x",
columns: chartData
},
axis: {
x: {
tick: {
format: function (x22) { return Math.pow(10,x22).toFixed(0); },
culling: {
max: {%=report.columns.length%} / 2
}
}
}
},
point: {
show: false
}
};
console.log(hold);
var chart = c3.generate(hold);
});
function onReady(selector, callback) {
var intervalID = window.setInterval(function() {
if (document.querySelector(selector) !== undefined) {
window.clearInterval(intervalID);
callback.call(this);
}
}, 500);}
</script>
</head>
<div style="margin-bottom: 7px;" class="text-center">
{%= frappe.boot.letter_heads[frappe.defaults.get_default("letter_head")] %}
</div>
<h2 class="text-center">{%= __(report.report_name) %}</h2>
<h4 class="text-center">{%= filters.item %} </h4>
<hr>
<table class="table table-bordered">
<thead>
<tr>
{% for(var i=0, l=report.columns.length; i<l; i++) { %}
<th style="width: 15%">{%= report.columns[i].label %}</th>
{% } %}
</tr>
</thead>
<tbody>
{% for(var i=0, l=data.length; i<l; i++) { %}
<tr>
{% for(var j=0,m=report.columns.length; j<m; j++) { %}
<td style="width: 15%">{%= data[i][report.columns[j].fieldname] %}</td>
{% } %}
</tr>
{% } %}
</tbody>
</table>
<h4 class="text-center"> Analysis Chart </h4>
<div id="chart_div"></div>
<p class="text-right text-muted">Printed On {%= frappe.datetime.str_to_user(frappe.datetime.get_datetime_as_string()) %}</p> | saurabh6790/erpnext | erpnext/buying/report/supplier_quotation_comparison/supplier_quotation_comparison.html | HTML | gpl-3.0 | 3,453 |
/*
* Copyright (c) 2009 by David Gräff <david.graeff@web.de>
* Copyright (c) 2011 by Maximilian Güntner <maximilian.guentner@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* For more information on the GPL, please go to:
* http://www.gnu.org/copyleft/gpl.html
*/
#include <stdlib.h>
#include <avr/io.h>
#include <avr/pgmspace.h>
#include "config.h"
#include "core/debug.h"
#include "core/eeprom.h"
#include "services/cron/cron.h"
#include "protocols/ecmd/ecmd-base.h"
#include "stella.h"
#ifndef TEENSY_SUPPORT
int16_t
parse_cmd_stella_eeprom_store(char *cmd, char *output, uint16_t len)
{
stella_storeToEEROM();
return ECMD_FINAL_OK;
}
int16_t
parse_cmd_stella_eeprom_load(char *cmd, char *output, uint16_t len)
{
stella_loadFromEEROM();
return ECMD_FINAL_OK;
}
#endif /* not TEENSY_SUPPORT */
int16_t
parse_cmd_stella_fadestep(char *cmd, char *output, uint16_t len)
{
if (cmd[0])
{
stella_fade_step = atoi(cmd);
return ECMD_FINAL_OK;
}
else
{
itoa(stella_fade_step, output, 10);
return ECMD_FINAL(strlen(output));
}
}
int16_t
parse_cmd_stella_channels(char *cmd, char *output, uint16_t len)
{
itoa(STELLA_CHANNELS, output, 10);
return ECMD_FINAL(strlen(output));
}
int16_t
parse_cmd_stella_channel(char *cmd, char *output, uint16_t len)
{
char f = 0;
uint8_t ch = 0;
uint8_t value = 0;
// following lines same as: sscanf_P(cmd, PSTR("%u %u %c"), &ch, &value, &f);
while (*cmd && *cmd == ' ')
cmd++; // skip whitespace
if (!*cmd)
{
/* not first argument == return all channels */
static uint8_t chan = 0;
uint8_t ret = 0;
// First return amount of channels with three bytes
if (chan == 0)
{
output[ret++] = ((uint8_t) STELLA_CHANNELS) / 10 + 48;
output[ret++] = ((uint8_t) STELLA_CHANNELS) % 10 + 48;
output[ret++] = '\n';
}
// return channel values
value = stella_getValue(chan);
output[ret + 2] = value % 10 + 48;
value /= 10;
output[ret + 1] = value % 10 + 48;
value /= 10;
output[ret + 0] = value % 10 + 48;
ret += 3;
if (chan < STELLA_CHANNELS - 1)
{
chan++;
return ECMD_AGAIN(ret);
}
else
{
chan = 0;
return ECMD_FINAL(ret);
}
}
ch = atoi(cmd); // save first argument == channel
while (*cmd && *cmd != ' ')
cmd++; // skip value
while (*cmd && *cmd == ' ')
cmd++; // skip whitespace
if (!*cmd)
{
/* no second argument -> get value */
if (ch >= STELLA_CHANNELS)
return ECMD_ERR_PARSE_ERROR;
itoa(stella_getValue(ch), output, 10);
return ECMD_FINAL(strlen(output));
}
value = atoi(cmd);
while (*cmd && *cmd != ' ')
cmd++; // skip value
while (*cmd && *cmd == ' ')
cmd++; // skip whitespace
/* third argument == fade step */
if (*cmd)
{
f = *cmd;
if (f == 's')
f = STELLA_SET_IMMEDIATELY;
else if (f == 'f')
f = STELLA_SET_FADE;
else if (f == 'y')
f = STELLA_SET_FLASHY;
}
if (ch >= STELLA_CHANNELS)
return ECMD_ERR_PARSE_ERROR;
stella_setValue(f, ch, value);
return ECMD_FINAL_OK;
}
/*
-- Ethersex META --
block([[Stella_Light]] commands)
ecmd_ifndef(TEENSY_SUPPORT)
ecmd_feature(stella_eeprom_store, "stella store",, Store values in eeprom)
ecmd_feature(stella_eeprom_load, "stella load",, Load values from eeprom)
ecmd_endif()
ecmd_feature(stella_channels, "channels",, Return stella channel size)
ecmd_feature(stella_channel, "channel", CHANNEL VALUE FUNCTION,Get/Set stella channel to value. Second and third parameters are optional. Function: You may use 's' for instant set, 'f' for fade and 'y' for flashy fade. )
ecmd_feature(stella_fadestep, "fadestep", FADESTEP, Get/Set stella fade step)
*/
| alezz/ethersex | services/stella/stella_ecmd.c | C | gpl-3.0 | 4,518 |
// Copyright (C) 2010-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-options "-std=gnu++11" }
// { dg-do compile { xfail uclibc } }
// { dg-excess-errors "" { target uclibc } }
#include <cmath>
void fpclassify() { }
void isfinite() { }
void isinf() { }
void isnan() { }
void isnormal() { }
void signbit() { }
void isgreater() { }
void isgreaterequal() { }
void isless() { }
void islessequal() { }
void islessgreater() { }
void isunordered() { }
#if _GLIBCXX_USE_C99_MATH
template <typename _Tp, typename _Up = _Tp>
void test_c99_classify()
{
bool test __attribute__((unused)) = true;
typedef _Tp fp_type_one;
typedef _Up fp_type_two;
fp_type_one f1 = 1.0;
fp_type_two f2 = 3.0;
int resi;
bool res;
resi = std::fpclassify(f1);
res = std::isfinite(f2);
res = std::isinf(f1);
res = std::isnan(f2);
res = std::isnormal(f1);
res = std::signbit(f2);
res = std::isgreater(f1, f2);
res = std::isgreaterequal(f1, f2);
res = std::isless(f1, f2);
res = std::islessequal(f1,f2);
res = std::islessgreater(f1, f2);
res = std::isunordered(f1, f2);
resi = resi; // Suppress unused warning.
res = res;
}
#endif
int main()
{
#if _GLIBCXX_USE_C99_MATH
test_c99_classify<float>();
test_c99_classify<double>();
test_c99_classify<long double>();
test_c99_classify<float, double>();
test_c99_classify<float, long double>();
test_c99_classify<double, float>();
test_c99_classify<double, long double>();
test_c99_classify<long double, float>();
test_c99_classify<long double, double>();
#endif
return 0;
}
| selmentdev/selment-toolchain | source/gcc-latest/libstdc++-v3/testsuite/26_numerics/headers/cmath/c99_classification_macros_c++11.cc | C++ | gpl-3.0 | 2,311 |
<?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
namespace core\navigation\views;
/**
* Class core_primary_testcase
*
* Unit test for the primary nav view.
*
* @package core
* @category navigation
* @copyright 2021 onwards Peter Dias
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
class primary_test extends \advanced_testcase {
/**
* Test the initialise in different contexts
*
* @param string $usertype The user to setup for - admin, guest, regular user
* @param string $expected The expected nodes
* @dataProvider test_setting_initialise_provider
*/
public function test_setting_initialise($usertype, $expected) {
global $PAGE;
$PAGE->set_url("/");
$this->resetAfterTest();
if ($usertype == 'admin') {
$this->setAdminUser();
} else if ($usertype == 'guest') {
$this->setGuestUser();
} else {
$user = $this->getDataGenerator()->create_user();
$this->setUser($user);
}
$node = new primary($PAGE);
$node->initialise();
$children = $node->get_children_key_list();
$this->assertEquals($expected, $children);
}
/**
* Data provider for the test_setting_initialise function
*/
public function test_setting_initialise_provider() {
return [
'Testing as a guest user' => ['guest', ['home', 'courses']],
'Testing as an admin' => ['admin', ['home', 'myhome', 'courses', 'siteadminnode']],
'Testing as a regular user' => ['user', ['home', 'myhome', 'courses']]
];
}
}
| marinaglancy/moodle | lib/tests/navigation/views/primary_test.php | PHP | gpl-3.0 | 2,299 |
{% load alert_tags %}
<div id="editSubscriptionsDialog">
<div class="modal-header">
<h4>Edit Subscriptions</h4>
</div>
<div class="modal-body">
<h6>What pings would you like to get and where should they go?</h6>
<form id="editAlertSubsForm" action="/alerts/subscriptions/" method="POST">
<table class="table">
<tr>
<th>Group</th>
{% for method in all_methods %}
<th>{{method}}</th>
{% endfor %}
</tr>
{% for group in available_groups %}
<tr>
<td>{{group.name}}</td>
{% for method in all_methods %}
<td><input name="{{group.pk}}_{{method}}" type="checkbox" {% if method in current_subs|keyvalue:group.name %} checked="checked" {% endif %} /></td>
{% endfor %}
</tr>
{% empty %}
<tr>
<td>You are not eligible for any alert groups!</td>
</tr>
{% endfor %}
</table>
</form>
</div>
<div class="modal-footer">
<button class="btn" onclick="$('#modalHolder').modal('hide').empty();">Cancel</button>
<button class="btn btn-primary" onclick="EditAlertSubscriptions();">Save</button>
</div>
<script type="text/javacript">
$('#editAlertSubsForm').submit(function(e){
e.preventDefault();
return false;
});
</script>
</div>
| SaintDrage/eve-wspace | evewspace/Alerts/templates/edit_subscriptions.html | HTML | gpl-3.0 | 1,602 |
#
# Copyright (C) 2011 Instructure, Inc.
#
# This file is part of Canvas.
#
# Canvas is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
class AssessmentQuestionBank < ActiveRecord::Base
include Workflow
attr_accessible :context, :title, :user, :alignments
EXPORTABLE_ATTRIBUTES = [:id, :context_id, :context_type, :title, :workflow_state, :deleted_at, :created_at, :updated_at]
EXPORTABLE_ASSOCIATIONS = [:context, :assessment_questions, :assessment_question_bank_users, :learning_outcome_alignments, :quiz_groups]
belongs_to :context, :polymorphic => true
validates_inclusion_of :context_type, :allow_nil => true, :in => ['Account', 'Course']
has_many :assessment_questions, :order => 'assessment_questions.name, assessment_questions.position, assessment_questions.created_at'
has_many :assessment_question_bank_users
has_many :learning_outcome_alignments, :as => :content, :class_name => 'ContentTag', :conditions => ['content_tags.tag_type = ? AND content_tags.workflow_state != ?', 'learning_outcome', 'deleted'], :include => :learning_outcome
has_many :quiz_groups, class_name: 'Quizzes::QuizGroup'
before_save :infer_defaults
after_save :update_alignments
validates_length_of :title, :maximum => maximum_string_length, :allow_nil => true
workflow do
state :active
state :deleted
end
set_policy do
given{|user, session| self.context.grants_right?(user, session, :manage_assignments) }
can :read and can :create and can :update and can :delete and can :manage
given{|user| user && self.assessment_question_bank_users.where(:user_id => user).exists? }
can :read
end
def self.default_imported_title
t :default_imported_title, 'Imported Questions'
end
def self.default_unfiled_title
t :default_unfiled_title, 'Unfiled Questions'
end
def self.unfiled_for_context(context)
context.assessment_question_banks.where(title: default_unfiled_title, workflow_state: 'active').first_or_create rescue nil
end
def cached_context_short_name
@cached_context_name ||= Rails.cache.fetch(['short_name_lookup', self.context_code].cache_key) do
self.context.short_name rescue ""
end
end
def assessment_question_count
self.assessment_questions.active.count
end
def context_code
"#{self.context_type.underscore}_#{self.context_id}"
end
def infer_defaults
self.title = t(:default_title, "No Name - %{course}", :course => self.context.name) if self.title.blank?
end
def alignments=(alignments)
# empty string from controller or empty hash
if alignments.empty?
outcomes = []
else
outcomes = context.linked_learning_outcomes.where(id: alignments.keys.map(&:to_i)).to_a
end
# delete alignments that aren't in the list anymore
if outcomes.empty?
learning_outcome_alignments.update_all(:workflow_state => 'deleted')
else
learning_outcome_alignments.
where("learning_outcome_id NOT IN (?)", outcomes).
update_all(:workflow_state => 'deleted')
end
# add/update current alignments
unless outcomes.empty?
alignments.each do |outcome_id, mastery_score|
outcome = outcomes.detect{ |outcome| outcome.id == outcome_id.to_i }
next unless outcome
outcome.align(self, context, :mastery_score => mastery_score)
end
end
end
def update_alignments
return unless workflow_state_changed? && deleted?
LearningOutcome.update_alignments(self, context, [])
end
def bookmark_for(user, do_bookmark=true)
if do_bookmark
question_bank_user = self.assessment_question_bank_users.where(user_id: user).first
question_bank_user ||= self.assessment_question_bank_users.create(:user => user)
else
AssessmentQuestionBankUser.where(:user_id => user, :assessment_question_bank_id => self).delete_all
end
end
def bookmarked_for?(user)
user && self.assessment_question_bank_users.where(user_id: user).exists?
end
def select_for_submission(quiz_id, count, exclude_ids=[], exclude_qq_ids=[])
ids = self.assessment_questions.active.pluck(:id)
ids = (ids - exclude_ids).shuffle[0...count]
questions = ids.empty? ? [] : AssessmentQuestion.where(id: ids).shuffle
questions.map do |aq|
aq.find_or_create_quiz_question(quiz_id, exclude_qq_ids)
end
end
alias_method :destroy!, :destroy
def destroy
self.workflow_state = 'deleted'
self.save
end
# clear out all questions so that the bank can be replaced. this is currently
# used by the respondus API.
def clear_for_replacement
assessment_questions.destroy_all
quiz_groups.destroy_all
end
scope :active, -> { where("assessment_question_banks.workflow_state<>'deleted'") }
end
| Rvor/canvas-lms | app/models/assessment_question_bank.rb | Ruby | agpl-3.0 | 5,275 |
<?php declare(strict_types=1);
namespace PhpParser\Node\Stmt;
use PhpParser\Node;
class Interface_ extends ClassLike
{
/** @var Node\Name[] Extended interfaces */
public $extends;
/**
* Constructs a class node.
*
* @param string|Node\Identifier $name Name
* @param array $subNodes Array of the following optional subnodes:
* 'extends' => array(): Name of extended interfaces
* 'stmts' => array(): Statements
* 'attrGroups' => array(): PHP attribute groups
* @param array $attributes Additional attributes
*/
public function __construct($name, array $subNodes = [], array $attributes = []) {
$this->attributes = $attributes;
$this->name = \is_string($name) ? new Node\Identifier($name) : $name;
$this->extends = $subNodes['extends'] ?? [];
$this->stmts = $subNodes['stmts'] ?? [];
$this->attrGroups = $subNodes['attrGroups'] ?? [];
}
public function getSubNodeNames() : array {
return ['attrGroups', 'name', 'extends', 'stmts'];
}
public function getType() : string {
return 'Stmt_Interface';
}
}
| aydancoskun/timetrex-community-edition | vendor/nikic/php-parser/lib/PhpParser/Node/Stmt/Interface_.php | PHP | agpl-3.0 | 1,229 |
/*
* @(#)TreeModel.java 1.27 10/03/23
*
* Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
package org.jaudiotagger.utils.tree;
/**
* The model used by <code>JTree</code>.
* <p>
* <code>JTree</code> and its related classes make extensive use of
* <code>TreePath</code>s for indentifying nodes in the <code>TreeModel</code>.
* If a <code>TreeModel</code> returns the same object, as compared by
* <code>equals</code>, at two different indices under the same parent
* than the resulting <code>TreePath</code> objects will be considered equal
* as well. Some implementations may assume that if two
* <code>TreePath</code>s are equal, they identify the same node. If this
* condition is not met, painting problems and other oddities may result.
* In other words, if <code>getChild</code> for a given parent returns
* the same Object (as determined by <code>equals</code>) problems may
* result, and it is recommended you avoid doing this.
* <p>
* Similarly <code>JTree</code> and its related classes place
* <code>TreePath</code>s in <code>Map</code>s. As such if
* a node is requested twice, the return values must be equal
* (using the <code>equals</code> method) and have the same
* <code>hashCode</code>.
* <p>
* For further information on tree models,
* including an example of a custom implementation,
* see <a
href="http://java.sun.com/docs/books/tutorial/uiswing/components/tree.html">How to Use Trees</a>
* in <em>The Java Tutorial.</em>
*
* @see TreePath
*
* @version 1.27 03/23/10
* @author Rob Davis
* @author Ray Ryan
*/
public interface TreeModel
{
/**
* Returns the root of the tree. Returns <code>null</code>
* only if the tree has no nodes.
*
* @return the root of the tree
*/
public Object getRoot();
/**
* Returns the child of <code>parent</code> at index <code>index</code>
* in the parent's
* child array. <code>parent</code> must be a node previously obtained
* from this data source. This should not return <code>null</code>
* if <code>index</code>
* is a valid index for <code>parent</code> (that is <code>index >= 0 &&
* index < getChildCount(parent</code>)).
*
* @param parent a node in the tree, obtained from this data source
* @return the child of <code>parent</code> at index <code>index</code>
*/
public Object getChild(Object parent, int index);
/**
* Returns the number of children of <code>parent</code>.
* Returns 0 if the node
* is a leaf or if it has no children. <code>parent</code> must be a node
* previously obtained from this data source.
*
* @param parent a node in the tree, obtained from this data source
* @return the number of children of the node <code>parent</code>
*/
public int getChildCount(Object parent);
/**
* Returns <code>true</code> if <code>node</code> is a leaf.
* It is possible for this method to return <code>false</code>
* even if <code>node</code> has no children.
* A directory in a filesystem, for example,
* may contain no files; the node representing
* the directory is not a leaf, but it also has no children.
*
* @param node a node in the tree, obtained from this data source
* @return true if <code>node</code> is a leaf
*/
public boolean isLeaf(Object node);
/**
* Messaged when the user has altered the value for the item identified
* by <code>path</code> to <code>newValue</code>.
* If <code>newValue</code> signifies a truly new value
* the model should post a <code>treeNodesChanged</code> event.
*
* @param path path to the node that the user has altered
* @param newValue the new value from the TreeCellEditor
*/
public void valueForPathChanged(TreePath path, Object newValue);
/**
* Returns the index of child in parent. If either <code>parent</code>
* or <code>child</code> is <code>null</code>, returns -1.
* If either <code>parent</code> or <code>child</code> don't
* belong to this tree model, returns -1.
*
* @param parent a node in the tree, obtained from this data source
* @param child the node we are interested in
* @return the index of the child in the parent, or -1 if either
* <code>child</code> or <code>parent</code> are <code>null</code>
* or don't belong to this tree model
*/
public int getIndexOfChild(Object parent, Object child);
//
// Change Events
//
/**
* Adds a listener for the <code>TreeModelEvent</code>
* posted after the tree changes.
*
* @param l the listener to add
* @see #removeTreeModelListener
*/
void addTreeModelListener(TreeModelListener l);
/**
* Removes a listener previously added with
* <code>addTreeModelListener</code>.
*
* @see #addTreeModelListener
* @param l the listener to remove
*/
void removeTreeModelListener(TreeModelListener l);
}
| craigpetchell/Jaudiotagger | src/org/jaudiotagger/utils/tree/TreeModel.java | Java | lgpl-2.1 | 5,159 |
/* gcc -g -Wall -O2 -o dialog-test dialog-test.c `pkg-config --cflags --libs gtk+-3.0` */
#include <gtk/gtk.h>
static GtkWidget *window;
static GtkWidget *width_chars_spin;
static GtkWidget *max_width_chars_spin;
static GtkWidget *default_width_spin;
static GtkWidget *default_height_spin;
static GtkWidget *resizable_check;
static gboolean
configure_event_cb (GtkWidget *window, GdkEventConfigure *event, GtkLabel *label)
{
gchar *str;
gint width, height;
gtk_window_get_size (GTK_WINDOW (window), &width, &height);
str = g_strdup_printf ("%d x %d", width, height);
gtk_label_set_label (label, str);
g_free (str);
return FALSE;
}
static void
show_dialog (void)
{
GtkWidget *dialog;
GtkWidget *label;
gint width_chars, max_width_chars, default_width, default_height;
gboolean resizable;
width_chars = gtk_spin_button_get_value_as_int (GTK_SPIN_BUTTON (width_chars_spin));
max_width_chars = gtk_spin_button_get_value_as_int (GTK_SPIN_BUTTON (max_width_chars_spin));
default_width = gtk_spin_button_get_value_as_int (GTK_SPIN_BUTTON (default_width_spin));
default_height = gtk_spin_button_get_value_as_int (GTK_SPIN_BUTTON (default_height_spin));
resizable = gtk_toggle_button_get_active (GTK_TOGGLE_BUTTON (resizable_check));
dialog = gtk_dialog_new_with_buttons ("Test", GTK_WINDOW (window),
GTK_DIALOG_MODAL,
"_Close", GTK_RESPONSE_CANCEL,
NULL);
label = gtk_label_new ("Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
"Nulla innn urna ac dui malesuada ornare. Nullam dictum "
"tempor mi et tincidunt. Aliquam metus nulla, auctor "
"vitae pulvinar nec, egestas at mi. Class aptent taciti "
"sociosqu ad litora torquent per conubia nostra, per "
"inceptos himenaeos. Aliquam sagittis, tellus congue "
"cursus congue, diam massa mollis enim, sit amet gravida "
"magna turpis egestas sapien. Aenean vel molestie nunc. "
"In hac habitasse platea dictumst. Suspendisse lacinia"
"mi eu ipsum vestibulum in venenatis enim commodo. "
"Vivamus non malesuada ligula.");
gtk_label_set_line_wrap (GTK_LABEL (label), TRUE);
gtk_label_set_width_chars (GTK_LABEL (label), width_chars);
gtk_label_set_max_width_chars (GTK_LABEL (label), max_width_chars);
gtk_window_set_default_size (GTK_WINDOW (dialog), default_width, default_height);
gtk_window_set_resizable (GTK_WINDOW (dialog), resizable);
gtk_box_pack_start (GTK_BOX (gtk_dialog_get_content_area (GTK_DIALOG (dialog))),
label, 0, TRUE, TRUE);
gtk_widget_show (label);
label = gtk_label_new ("? x ?");
//gtk_widget_show (label);
gtk_dialog_add_action_widget (GTK_DIALOG (dialog), label, GTK_RESPONSE_HELP);
g_signal_connect (dialog, "configure-event",
G_CALLBACK (configure_event_cb), label);
gtk_dialog_run (GTK_DIALOG (dialog));
gtk_widget_destroy (dialog);
}
static void
create_window (void)
{
GtkWidget *grid;
GtkWidget *label;
GtkWidget *button;
window = gtk_window_new (GTK_WINDOW_TOPLEVEL);
gtk_window_set_title (GTK_WINDOW (window), "Window size");
gtk_container_set_border_width (GTK_CONTAINER (window), 12);
gtk_window_set_resizable (GTK_WINDOW (window), FALSE);
grid = gtk_grid_new ();
gtk_grid_set_row_spacing (GTK_GRID (grid), 12);
gtk_grid_set_column_spacing (GTK_GRID (grid), 12);
gtk_container_add (GTK_CONTAINER (window), grid);
label = gtk_label_new ("Width chars");
gtk_widget_set_halign (label, GTK_ALIGN_START);
width_chars_spin = gtk_spin_button_new_with_range (-1, 1000, 1);
gtk_widget_set_halign (width_chars_spin, GTK_ALIGN_START);
gtk_grid_attach (GTK_GRID (grid), label, 0, 0, 1, 1);
gtk_grid_attach (GTK_GRID (grid), width_chars_spin, 1, 0, 1, 1);
label = gtk_label_new ("Max width chars");
gtk_widget_set_halign (label, GTK_ALIGN_START);
max_width_chars_spin = gtk_spin_button_new_with_range (-1, 1000, 1);
gtk_widget_set_halign (width_chars_spin, GTK_ALIGN_START);
gtk_grid_attach (GTK_GRID (grid), label, 0, 1, 1, 1);
gtk_grid_attach (GTK_GRID (grid), max_width_chars_spin, 1, 1, 1, 1);
label = gtk_label_new ("Default size");
gtk_widget_set_halign (label, GTK_ALIGN_START);
default_width_spin = gtk_spin_button_new_with_range (-1, 1000, 1);
gtk_widget_set_halign (default_width_spin, GTK_ALIGN_START);
default_height_spin = gtk_spin_button_new_with_range (-1, 1000, 1);
gtk_widget_set_halign (default_height_spin, GTK_ALIGN_START);
gtk_grid_attach (GTK_GRID (grid), label, 0, 2, 1, 1);
gtk_grid_attach (GTK_GRID (grid), default_width_spin, 1, 2, 1, 1);
gtk_grid_attach (GTK_GRID (grid), default_height_spin, 2, 2, 1, 1);
label = gtk_label_new ("Resizable");
gtk_widget_set_halign (label, GTK_ALIGN_START);
resizable_check = gtk_check_button_new ();
gtk_widget_set_halign (resizable_check, GTK_ALIGN_START);
gtk_grid_attach (GTK_GRID (grid), label, 0, 3, 1, 1);
gtk_grid_attach (GTK_GRID (grid), resizable_check, 1, 3, 1, 1);
button = gtk_button_new_with_label ("Show");
g_signal_connect (button, "clicked", G_CALLBACK (show_dialog), NULL);
gtk_grid_attach (GTK_GRID (grid), button, 2, 4, 1, 1);
gtk_widget_show_all (window);
}
int
main (int argc, char *argv[])
{
gtk_init (NULL, NULL);
create_window ();
gtk_main ();
return 0;
}
| Sidnioulz/SandboxGtk | tests/testwindowsize.c | C | lgpl-2.1 | 5,611 |
/**
* Vosao CMS. Simple CMS for Google App Engine.
*
* Copyright (C) 2009-2010 Vosao development team.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* email: vosao.dev@gmail.com
*/
package org.vosao.dao.impl;
import static com.google.appengine.api.datastore.Query.FilterOperator.EQUAL;
import java.util.Date;
import java.util.List;
import org.vosao.dao.BaseDaoImpl;
import org.vosao.dao.FileChunkDao;
import org.vosao.dao.FileDao;
import org.vosao.entity.FileEntity;
import com.google.appengine.api.datastore.Query;
public class FileDaoImpl extends BaseDaoImpl<FileEntity>
implements FileDao {
public FileDaoImpl() {
super(FileEntity.class);
}
@Override
public void remove(final Long fileId) {
if (fileId == null) {
return;
}
getFileChunkDao().removeByFile(fileId);
super.remove(fileId);
}
@Override
public void remove(final List<Long> ids) {
for (Long fileId : ids) {
remove(fileId);
}
}
@Override
public List<FileEntity> getByFolder(Long folderId) {
Query q = newQuery();
q.addFilter("folderId", EQUAL, folderId);
return select(q, "getByFolder", params(folderId));
}
@Override
public FileEntity getByName(Long folderId, String name) {
Query q = newQuery();
q.addFilter("folderId", EQUAL, folderId);
q.addFilter("filename", EQUAL, name);
return selectOne(q, "getByName", params(folderId, name));
}
@Override
public void save(FileEntity file, byte[] content) {
file.setLastModifiedTime(new Date());
file.setSize(content.length);
save(file);
getFileChunkDao().save(file, content);
}
@Override
public void removeByFolder(Long folderId) {
List<FileEntity> files = getByFolder(folderId);
for (FileEntity file : files) {
remove(file.getId());
}
}
@Override
public void removeAll() {
super.removeAll();
getFileChunkDao().removeAll();
}
public FileChunkDao getFileChunkDao() {
return getDao().getFileChunkDao();
}
@Override
public byte[] getFileContent(FileEntity file) {
return getFileChunkDao().getFileContent(file);
}
}
| vosaocms/vosao | kernel/src/org/vosao/dao/impl/FileDaoImpl.java | Java | lgpl-2.1 | 2,730 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Martine Lenders <mail@martine-lenders.eu>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import os
import sys
import random
import subprocess
import time
import types
import pexpect
DEFAULT_TIMEOUT = 5
class Strategy(object):
def __init__(self, func=None):
if func is not None:
if sys.version_info < (3,):
self.__class__.execute = types.MethodType(func, self, self.__class__)
else:
self.__class__.execute = types.MethodType(func, self)
def execute(self, *args, **kwargs):
raise NotImplementedError()
class ApplicationStrategy(Strategy):
def __init__(self, app_dir=os.getcwd(), func=None):
super(ApplicationStrategy, self).__init__(func)
self.app_dir = app_dir
class BoardStrategy(Strategy):
def __init__(self, board, func=None):
super(BoardStrategy, self).__init__(func)
self.board = board
def __run_make(self, application, make_targets, env=None):
env = os.environ.copy()
if env is not None:
env.update(env)
env.update(self.board.to_env())
cmd = ("make", "-C", application) + make_targets
print(' '.join(cmd))
print(subprocess.check_output(cmd, env=env))
def execute(self, application):
super(BoardStrategy, self).execute(application)
class CleanStrategy(BoardStrategy):
def execute(self, application, env=None):
super(CleanStrategy, self).__run_make(application, ("-B", "clean"), env)
class BuildStrategy(BoardStrategy):
def execute(self, application, env=None):
super(BuildStrategy, self).__run_make(application, ("all",), env)
class FlashStrategy(BoardStrategy):
def execute(self, application, env=None):
super(FlashStrategy, self).__run_make(application, ("all",), env)
class ResetStrategy(BoardStrategy):
def execute(self, application, env=None):
super(ResetStrategy, self).__run_make(application, ("reset",), env)
class Board(object):
def __init__(self, name, port=None, serial=None, clean=None,
build=None, flash=None,
reset=None, term=None):
def _reset_native_execute(obj, application, env=None, *args, **kwargs):
pass
if (name == "native") and (reset is None):
reset = _reset_native_execute
self.name = name
self.port = port
self.serial = serial
self.clean_strategy = CleanStrategy(self, clean)
self.build_strategy = BuildStrategy(self, build)
self.flash_strategy = FlashStrategy(self, flash)
self.reset_strategy = ResetStrategy(self, reset)
def __len__(self):
return 1
def __iter__(self):
return self
def next(self):
raise StopIteration()
def __repr__(self):
return ("<Board %s,port=%s,serial=%s>" %
(repr(self.name), repr(self.port), repr(self.serial)))
def to_env(self):
env = {}
if self.name:
env['BOARD'] = self.name
if self.port:
env['PORT'] = self.port
if self.serial:
env['SERIAL'] = self.serial
return env
def clean(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def build(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def flash(self, application=os.getcwd(), env=None):
self.flash_strategy.execute(application, env)
def reset(self, application=os.getcwd(), env=None):
self.reset_strategy.execute(application, env)
class BoardGroup(object):
def __init__(self, boards):
self.boards = boards
def __len__(self):
return len(self.boards)
def __iter__(self):
return iter(self.boards)
def __repr__(self):
return str(self.boards)
def clean(self, application=os.getcwd(), env=None):
for board in self.boards:
board.clean(application, env)
def build(self, application=os.getcwd(), env=None):
for board in self.boards:
board.build(application, env)
def flash(self, application=os.getcwd(), env=None):
for board in self.boards:
board.flash(application, env)
def reset(self, application=os.getcwd(), env=None):
for board in self.boards:
board.reset(application, env)
def default_test_case(board_group, application, env=None):
for board in board_group:
env = os.environ.copy()
if env is not None:
env.update(env)
env.update(board.to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env,
timeout=DEFAULT_TIMEOUT,
logfile=sys.stdout) as spawn:
spawn.expect("TEST: SUCCESS")
class TestStrategy(ApplicationStrategy):
def execute(self, board_groups, test_cases=[default_test_case],
timeout=DEFAULT_TIMEOUT, env=None):
for board_group in board_groups:
print("Testing for %s: " % board_group)
for test_case in test_cases:
board_group.reset()
test_case(board_group, self.app_dir, env=None)
sys.stdout.write('.')
sys.stdout.flush()
print()
def get_ipv6_address(spawn):
spawn.sendline(u"ifconfig")
spawn.expect(u"[A-Za-z0-9]{2}_[0-9]+: inet6 (fe80::[0-9a-f:]+)")
return spawn.match.group(1)
def test_ipv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"ip send %s %d 01:23:45:67:89:ab:cd:ef" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 8 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 23 45 67 89 AB CD EF")
def test_udpv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d ab:cd:ef" % (receiver_ip, port))
sender.expect_exact(u"Success: send 3 byte over UDP to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"00000000 AB CD EF")
def test_tcpv6_send(board_group, application, env=None):
env_client = os.environ.copy()
if env is not None:
env_client.update(env)
env_client.update(board_group.boards[0].to_env())
env_server = os.environ.copy()
if env is not None:
env_server.update(env)
env_server.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_client,
timeout=DEFAULT_TIMEOUT) as client, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_server,
timeout=DEFAULT_TIMEOUT) as server:
port = random.randint(0x0000, 0xffff)
server_ip = get_ipv6_address(server)
client_ip = get_ipv6_address(client)
server.sendline(u"tcp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
client.sendline(u"tcp connect %s %d" % (server_ip, port))
server.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % client_ip)
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"Success: send 4 byte over TCP to server")
server.expect(u"00000000 AF FE AB E0")
client.sendline(u"tcp disconnect")
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"could not send")
def test_triple_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
udp_port = random.randint(0x0000, 0xffff)
tcp_port = random.randint(0x0000, 0xffff)
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
sender_ip = get_ipv6_address(sender)
receiver.sendline(u"ip server start %d" % ipprot)
receiver.sendline(u"udp server start %d" % udp_port)
receiver.sendline(u"tcp server start %d" % tcp_port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d 01:23" % (receiver_ip, udp_port))
sender.expect_exact(u"Success: send 2 byte over UDP to [%s]:%d" %
(receiver_ip, udp_port))
receiver.expect(u"00000000 01 23")
sender.sendline(u"ip send %s %d 01:02:03:04" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 4 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 02 03 04")
sender.sendline(u"tcp connect %s %d" % (receiver_ip, tcp_port))
receiver.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % sender_ip)
sender.sendline(u"tcp send dead:beef")
sender.expect_exact(u"Success: send 4 byte over TCP to server")
receiver.expect(u"00000000 DE AD BE EF")
if __name__ == "__main__":
TestStrategy().execute([BoardGroup((Board("native", "tap0"),
Board("native", "tap1")))],
[test_ipv6_send, test_udpv6_send, test_tcpv6_send,
test_triple_send])
| BytesGalore/RIOT | tests/lwip/tests/01-run.py | Python | lgpl-2.1 | 11,453 |
<!DOCTYPE html >
<html>
<head>
<title>Application - io.gearpump.cluster.Application</title>
<meta name="description" content="Application - io.gearpump.cluster.Application" />
<meta name="keywords" content="Application io.gearpump.cluster.Application" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<link href="../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" />
<link href="../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" />
<script type="text/javascript" src="../../../lib/jquery.js" id="jquery-js"></script>
<script type="text/javascript" src="../../../lib/jquery-ui.js"></script>
<script type="text/javascript" src="../../../lib/template.js"></script>
<script type="text/javascript" src="../../../lib/tools.tooltip.js"></script>
<script type="text/javascript">
if(top === self) {
var url = '../../../index.html';
var hash = 'io.gearpump.cluster.Application';
var anchor = window.location.hash;
var anchor_opt = '';
if (anchor.length >= 1)
anchor_opt = '@' + anchor.substring(1);
window.location.href = url + '#' + hash + anchor_opt;
}
</script>
</head>
<body class="type">
<div id="definition">
<a href="Application$.html" title="Go to companion"><img src="../../../lib/trait_to_object_big.png" /></a>
<p id="owner"><a href="../../package.html" class="extype" name="io">io</a>.<a href="../package.html" class="extype" name="io.gearpump">gearpump</a>.<a href="package.html" class="extype" name="io.gearpump.cluster">cluster</a></p>
<h1><a href="Application$.html" title="Go to companion">Application</a></h1><h3><span class="morelinks"><div>
Related Docs:
<a href="Application$.html" title="See companion">object Application</a>
| <a href="package.html" class="extype" name="io.gearpump.cluster">package cluster</a>
</div></span></h3><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
</div>
<h4 id="signature" class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">trait</span>
</span>
<span class="symbol">
<span class="name">Application</span><span class="result"> extends <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4>
<div id="comment" class="fullcommenttop"><div class="toggleContainer block">
<span class="toggle">Linear Supertypes</span>
<div class="superTypes hiddenContent"><span class="extype" name="scala.AnyRef">AnyRef</span>, <span class="extype" name="scala.Any">Any</span></div>
</div><div class="toggleContainer block">
<span class="toggle">Known Subclasses</span>
<div class="subClasses hiddenContent"><a href="Application$$DefaultApplication.html" class="extype" name="io.gearpump.cluster.Application.DefaultApplication">DefaultApplication</a>, <a href="../streaming/javaapi/StreamApplication.html" class="extype" name="io.gearpump.streaming.javaapi.StreamApplication">StreamApplication</a>, <a href="../streaming/StreamApplication.html" class="extype" name="io.gearpump.streaming.StreamApplication">StreamApplication</a></div>
</div></div>
<div id="mbrsel">
<div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div>
<div id="order">
<span class="filtertype">Ordering</span>
<ol>
<li class="alpha in"><span>Alphabetic</span></li>
<li class="inherit out"><span>By inheritance</span></li>
</ol>
</div>
<div id="ancestors">
<span class="filtertype">Inherited<br />
</span>
<ol id="linearization">
<li class="in" name="io.gearpump.cluster.Application"><span>Application</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li>
</ol>
</div><div id="ancestors">
<span class="filtertype"></span>
<ol>
<li class="hideall out"><span>Hide All</span></li>
<li class="showall in"><span>Show all</span></li>
</ol>
<a href="http://docs.scala-lang.org/overviews/scaladoc/usage.html#members" target="_blank">Learn more about member selection</a>
</div>
<div id="visbl">
<span class="filtertype">Visibility</span>
<ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol>
</div>
</div>
<div id="template">
<div id="allMembers">
<div id="values" class="values members">
<h3>Abstract Value Members</h3>
<ol><li name="io.gearpump.cluster.Application#appMaster" visbl="pub" data-isabs="true" fullComment="no" group="Ungrouped">
<a id="appMaster:Class[_<:io.gearpump.cluster.ApplicationMaster]"></a>
<a id="appMaster:Class[_<:ApplicationMaster]"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">abstract </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">appMaster</span><span class="result">: <span class="extype" name="scala.Predef.Class">Class</span>[_ <: <a href="ApplicationMaster.html" class="extype" name="io.gearpump.cluster.ApplicationMaster">ApplicationMaster</a>]</span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@appMaster:Class[_<:io.gearpump.cluster.ApplicationMaster]" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
</li><li name="io.gearpump.cluster.Application#name" visbl="pub" data-isabs="true" fullComment="no" group="Ungrouped">
<a id="name:String"></a>
<a id="name:String"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">abstract </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">name</span><span class="result">: <span class="extype" name="scala.Predef.String">String</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@name:String" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
</li><li name="io.gearpump.cluster.Application#userConfig" visbl="pub" data-isabs="true" fullComment="no" group="Ungrouped">
<a id="userConfig(implicitsystem:akka.actor.ActorSystem):io.gearpump.cluster.UserConfig"></a>
<a id="userConfig(ActorSystem):UserConfig"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">abstract </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">userConfig</span><span class="params">(<span class="implicit">implicit </span><span name="system">system: <span class="extype" name="akka.actor.ActorSystem">ActorSystem</span></span>)</span><span class="result">: <a href="UserConfig.html" class="extype" name="io.gearpump.cluster.UserConfig">UserConfig</a></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@userConfig(implicitsystem:akka.actor.ActorSystem):io.gearpump.cluster.UserConfig" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
</li></ol>
</div>
<div id="values" class="values members">
<h3>Concrete Value Members</h3>
<ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:Any):Boolean"></a>
<a id="!=(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@!=(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="##():Int"></a>
<a id="##():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@##():Int" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:Any):Boolean"></a>
<a id="==(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@==(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="asInstanceOf[T0]:T0"></a>
<a id="asInstanceOf[T0]:T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@asInstanceOf[T0]:T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="clone():Object"></a>
<a id="clone():AnyRef"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">clone</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@clone():Object" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.CloneNotSupportedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="eq(x$1:AnyRef):Boolean"></a>
<a id="eq(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">eq</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@eq(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#equals" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="equals(x$1:Any):Boolean"></a>
<a id="equals(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">equals</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@equals(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="finalize():Unit"></a>
<a id="finalize():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">finalize</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@finalize():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="symbol">classOf[java.lang.Throwable]</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="getClass():Class[_]"></a>
<a id="getClass():Class[_]"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@getClass():Class[_]" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#hashCode" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="hashCode():Int"></a>
<a id="hashCode():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">hashCode</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@hashCode():Int" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isInstanceOf[T0]:Boolean"></a>
<a id="isInstanceOf[T0]:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@isInstanceOf[T0]:Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="ne(x$1:AnyRef):Boolean"></a>
<a id="ne(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">ne</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@ne(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notify():Unit"></a>
<a id="notify():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notify</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@notify():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notifyAll():Unit"></a>
<a id="notifyAll():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notifyAll</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@notifyAll():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="synchronized[T0](x$1:=>T0):T0"></a>
<a id="synchronized[T0](⇒T0):T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: ⇒ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@synchronized[T0](x$1:=>T0):T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#toString" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="toString():String"></a>
<a id="toString():String"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">toString</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.String">String</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@toString():String" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait():Unit"></a>
<a id="wait():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@wait():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long,x$2:Int):Unit"></a>
<a id="wait(Long,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>, <span name="arg1">arg1: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@wait(x$1:Long,x$2:Int):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long):Unit"></a>
<a id="wait(Long):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.Application@wait(x$1:Long):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li></ol>
</div>
</div>
<div id="inheritedMembers">
<div class="parent" name="scala.AnyRef">
<h3>Inherited from <span class="extype" name="scala.AnyRef">AnyRef</span></h3>
</div><div class="parent" name="scala.Any">
<h3>Inherited from <span class="extype" name="scala.Any">Any</span></h3>
</div>
</div>
<div id="groupedMembers">
<div class="group" name="Ungrouped">
<h3>Ungrouped</h3>
</div>
</div>
</div>
<div id="tooltip"></div>
<div id="footer"> </div>
</body>
</html> | gearpump/gearpump.github.io | releases/0.7.6/api/scala/io/gearpump/cluster/Application.html | HTML | apache-2.0 | 28,251 |
<div class="unit one-fifth hide-on-mobiles">
<aside>
<ul>
<li class="{% if page.title == 'News' %}current{% endif %}">
<a href="{{ site.baseurl }}/news/">All News</a>
</li>
<li class="{% if page.title == 'Releases' %}current{% endif %}">
<a href="{{ site.baseurl }}/news/releases/">Calcite Releases</a>
</li>
</ul>
<h4>Recent Releases</h4>
<ul>
{% for post in site.categories.release limit:5 %}
<li class="{% if page.title == post.title %}current{% endif %}">
<a href="{{ site.baseurl }}{{ post.url }}">{{ post.version }}</a>
</li>
{% endfor %}
</ul>
<h4>Other News</h4>
<ul>
{% for post in site.posts %}{% comment %}
{% endcomment %}{% unless post.categories contains 'release' %}
<li class="{% if page.title == post.title %}current{% endif %}">
<a href="{{ site.baseurl }}{{ post.url }}">{{ post.title }}</a>
</li>
{% endunless %}{% comment %}
{% endcomment %}{% endfor %}
</ul>
</aside>
</div>
| dindin5258/calcite | site/_includes/news_contents.html | HTML | apache-2.0 | 1,067 |
/*
* ProGuard -- shrinking, optimization, obfuscation, and preverification
* of Java bytecode.
*
* Copyright (c) 2002-2017 Eric Lafortune @ GuardSquare
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package proguard.evaluation.value;
import proguard.classfile.*;
/**
* This value factory creates particular values.
*
* @author Eric Lafortune
*/
public class ParticularValueFactory
extends ValueFactory
{
// Shared copies of Value objects, to avoid creating a lot of objects.
static final IntegerValue INTEGER_VALUE_M1 = new ParticularIntegerValue(-1);
static final IntegerValue INTEGER_VALUE_0 = new ParticularIntegerValue(0);
static final IntegerValue INTEGER_VALUE_1 = new ParticularIntegerValue(1);
static final IntegerValue INTEGER_VALUE_2 = new ParticularIntegerValue(2);
static final IntegerValue INTEGER_VALUE_3 = new ParticularIntegerValue(3);
static final IntegerValue INTEGER_VALUE_4 = new ParticularIntegerValue(4);
static final IntegerValue INTEGER_VALUE_5 = new ParticularIntegerValue(5);
static final LongValue LONG_VALUE_0 = new ParticularLongValue(0);
static final LongValue LONG_VALUE_1 = new ParticularLongValue(1);
static final FloatValue FLOAT_VALUE_0 = new ParticularFloatValue(0.0f);
static final FloatValue FLOAT_VALUE_1 = new ParticularFloatValue(1.0f);
static final FloatValue FLOAT_VALUE_2 = new ParticularFloatValue(2.0f);
static final DoubleValue DOUBLE_VALUE_0 = new ParticularDoubleValue(0.0);
static final DoubleValue DOUBLE_VALUE_1 = new ParticularDoubleValue(1.0);
private static int POS_ZERO_FLOAT_BITS = Float.floatToIntBits(0.0f);
private static long POS_ZERO_DOUBLE_BITS = Double.doubleToLongBits(0.0);
// Implementations for ValueFactory.
public IntegerValue createIntegerValue(int value)
{
switch (value)
{
case -1: return INTEGER_VALUE_M1;
case 0: return INTEGER_VALUE_0;
case 1: return INTEGER_VALUE_1;
case 2: return INTEGER_VALUE_2;
case 3: return INTEGER_VALUE_3;
case 4: return INTEGER_VALUE_4;
case 5: return INTEGER_VALUE_5;
default: return new ParticularIntegerValue(value);
}
}
public LongValue createLongValue(long value)
{
return value == 0L ? LONG_VALUE_0 :
value == 1L ? LONG_VALUE_1 :
new ParticularLongValue(value);
}
public FloatValue createFloatValue(float value)
{
// Make sure to distinguish between +0.0 and -0.0.
return value == 0.0f && Float.floatToIntBits(value) == POS_ZERO_FLOAT_BITS
? FLOAT_VALUE_0 :
value == 1.0f ? FLOAT_VALUE_1 :
value == 2.0f ? FLOAT_VALUE_2 :
new ParticularFloatValue(value);
}
public DoubleValue createDoubleValue(double value)
{
// Make sure to distinguish between +0.0 and -0.0.
return value == 0.0 && Double.doubleToLongBits(value) == POS_ZERO_DOUBLE_BITS
? DOUBLE_VALUE_0 :
value == 1.0 ? DOUBLE_VALUE_1 :
new ParticularDoubleValue(value);
}
public ReferenceValue createArrayReferenceValue(String type,
Clazz referencedClass,
IntegerValue arrayLength)
{
return type == null ?
REFERENCE_VALUE_NULL :
new ArrayReferenceValue(ClassConstants.TYPE_ARRAY + type,
referencedClass,
arrayLength);
}
}
| damienmg/bazel | third_party/java/proguard/proguard5.3.3/src/proguard/evaluation/value/ParticularValueFactory.java | Java | apache-2.0 | 4,464 |
module Fog
module Compute
class Ecloud
class Real
basic_request :get_tasks
end
end
end
end
| jreichhold/chef-repo | vendor/ruby/2.0.0/gems/fog-1.20.0/lib/fog/ecloud/requests/compute/get_tasks.rb | Ruby | apache-2.0 | 125 |
/**
* Copyright 2016 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {setStyle} from '../src/style';
import {validateData, writeScript} from '../3p/3p';
/**
* @param {!Window} global
* @param {!Object} data
*/
export function inmobi(global, data) {
validateData(data, ['siteid', 'slotid'], []);
const inmobiConf = {
siteid: data.siteid,
slot: data.slotid,
manual: true,
onError: code => {
if (code == 'nfr') {
global.context.noContentAvailable();
setStyle(document.getElementById('my-ad-slot'), 'display', 'none');
}
},
onSuccess: () => {
global.context.renderStart();
},
};
writeScript(global, 'https://cf.cdn.inmobi.com/ad/inmobi.secure.js', () => {
global.document.write("<div id='my-ad-slot'></div>");
global._inmobi.getNewAd(document.getElementById('my-ad-slot'), inmobiConf);
});
}
| dotandads/amphtml | ads/inmobi.js | JavaScript | apache-2.0 | 1,441 |
// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.util;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* Various utility methods for network related stuff.
*/
public final class NetUtil {
private static String hostname;
private NetUtil() {
}
/**
* Returns the *cached* short hostname (computed at most once per the lifetime of a server). Can
* take seconds to complete when the cache is cold.
*/
public static String getCachedShortHostName() {
if (hostname == null) {
synchronized (NetUtil.class) {
if (hostname == null) {
hostname = computeShortHostName();
}
}
}
return hostname;
}
/**
* Returns the short hostname or <code>unknown</code> if the host name could not be determined.
* Performs reverse DNS lookup and can take seconds to complete.
*/
private static String computeShortHostName() {
try {
return InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
return "unknown";
}
}
}
| damienmg/bazel | src/main/java/com/google/devtools/build/lib/util/NetUtil.java | Java | apache-2.0 | 1,655 |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
v1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
scheme "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme"
)
// DeploymentsGetter has a method to return a DeploymentInterface.
// A group's client should implement this interface.
type DeploymentsGetter interface {
Deployments(namespace string) DeploymentInterface
}
// DeploymentInterface has methods to work with Deployment resources.
type DeploymentInterface interface {
Create(*v1beta1.Deployment) (*v1beta1.Deployment, error)
Update(*v1beta1.Deployment) (*v1beta1.Deployment, error)
UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error)
List(opts v1.ListOptions) (*v1beta1.DeploymentList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error)
DeploymentExpansion
}
// deployments implements DeploymentInterface
type deployments struct {
client rest.Interface
ns string
}
// newDeployments returns a Deployments
func newDeployments(c *AppsV1beta1Client, namespace string) *deployments {
return &deployments{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
result = &v1beta1.Deployment{}
err = c.client.Get().
Namespace(c.ns).
Resource("deployments").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Deployments that match those selectors.
func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
result = &v1beta1.DeploymentList{}
err = c.client.Get().
Namespace(c.ns).
Resource("deployments").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested deployments.
func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("deployments").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any.
func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
result = &v1beta1.Deployment{}
err = c.client.Post().
Namespace(c.ns).
Resource("deployments").
Body(deployment).
Do().
Into(result)
return
}
// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
result = &v1beta1.Deployment{}
err = c.client.Put().
Namespace(c.ns).
Resource("deployments").
Name(deployment.Name).
Body(deployment).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
result = &v1beta1.Deployment{}
err = c.client.Put().
Namespace(c.ns).
Resource("deployments").
Name(deployment.Name).
SubResource("status").
Body(deployment).
Do().
Into(result)
return
}
// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
func (c *deployments) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("deployments").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("deployments").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched deployment.
func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) {
result = &v1beta1.Deployment{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("deployments").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
| kedgeproject/kedge | vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/deployment.go | GO | apache-2.0 | 5,581 |
/*
* Copyright 2018 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.crypto;
import org.keycloak.jose.jwe.JWEConstants;
import org.keycloak.models.KeycloakSession;
public class RsaesOaep256CekManagementProviderFactory implements CekManagementProviderFactory {
public static final String ID = JWEConstants.RSA_OAEP_256;
@Override
public String getId() {
return ID;
}
@Override
public CekManagementProvider create(KeycloakSession session) {
return new RsaCekManagementProvider(session, ID);
}
}
| keycloak/keycloak | services/src/main/java/org/keycloak/crypto/RsaesOaep256CekManagementProviderFactory.java | Java | apache-2.0 | 1,171 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder;
public class AutoDateHistogramTests extends BaseAggregationTestCase<AutoDateHistogramAggregationBuilder> {
@Override
protected AutoDateHistogramAggregationBuilder createTestAggregatorBuilder() {
AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder(randomAlphaOfLengthBetween(1, 10));
builder.field(INT_FIELD_NAME);
builder.setNumBuckets(randomIntBetween(1, 100000));
if (randomBoolean()) {
builder.format("###.##");
}
if (randomBoolean()) {
builder.missing(randomIntBetween(0, 10));
}
if (randomBoolean()) {
builder.timeZone(randomDateTimeZone());
}
return builder;
}
}
| gfyoung/elasticsearch | server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java | Java | apache-2.0 | 1,748 |
package com.twitter.finagle.memcached.unit
import com.twitter.finagle.memcached._
import com.twitter.finagle.memcached.protocol.Value
import scala.collection.immutable
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class GetResultTest extends FunSuite with MockitoSugar {
class Context {
val value1 = mock[Value]
val value2 = mock[Value]
val ex1 = mock[Exception]
val ex2 = mock[Exception]
val empty = GetResult()
val left = GetResult(
hits = Map("h1" -> value1),
misses = immutable.Set("m1"),
failures = Map("f1" -> ex1))
val right = GetResult(
hits = Map("h2" -> value2),
misses = immutable.Set("m2"),
failures = Map("f2" -> ex2))
}
test("add together hits/misses/failures with ++") {
val context = new Context
import context._
info("both empty")
assert(empty ++ empty === empty)
info("non-empty left, empty right")
assert(left ++ empty === left)
info("Empty left, non-empty right")
assert(empty ++ right === right)
info("non-empty left, non-empty right")
assert(left ++ right === GetResult(
hits = Map("h1" -> value1, "h2" -> value2),
misses = immutable.Set("m1", "m2"),
failures = Map("f1" -> ex1, "f2" -> ex2)
))
}
test("merged of empty seq produces empty GetResult") {
val context = new Context
import context._
assert(GetResult.merged(Seq[GetResult]()) === GetResult())
}
test("merged of single item produces that item") {
val context = new Context
import context._
val getResult = GetResult()
assert(GetResult.merged(Seq(getResult)) === getResult)
}
test("merge is the same as ++") {
val context = new Context
import context._
val subResults = (1 to 10) map { i =>
GetResult(
hits = Map("h" + i -> mock[Value]),
misses = immutable.Set("m" + i),
failures = Map("f" + i -> mock[Exception]))
}
assert(GetResult.merged(subResults) === (subResults.reduceLeft { _ ++ _ }))
}
}
| suls/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/GetResultTest.scala | Scala | apache-2.0 | 2,150 |
/**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.krad.datadictionary;
import org.apache.commons.lang.StringUtils;
import org.kuali.rice.krad.bo.BusinessObject;
import org.kuali.rice.krad.datadictionary.parse.BeanTag;
import org.kuali.rice.krad.datadictionary.parse.BeanTagAttribute;
import org.kuali.rice.krad.datadictionary.validator.ValidationTrace;
/**
* A single BusinessObject entry in the DataDictionary, which contains information relating to the display, validation,
* and general maintenance of a BusinessObject and its attributes.
*
* Note: the setters do copious amounts of validation, to facilitate generating errors during the parsing process
*
* @author Kuali Rice Team (rice.collab@kuali.org)
*/
@BeanTag(name = "businessObjectEntry")
public class BusinessObjectEntry extends DataObjectEntry {
private static final org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(BusinessObjectEntry.class);
public void setBusinessObjectClass(Class<? extends BusinessObject> businessObjectClass) {
super.setDataObjectClass(businessObjectClass);
if (businessObjectClass == null) {
throw new IllegalArgumentException("invalid (null) dataObjectClass");
}
if (getRelationships() != null) {
for (RelationshipDefinition rd : getRelationships()) {
rd.setSourceClass(businessObjectClass);
}
}
}
public Class<? extends BusinessObject> getBusinessObjectClass() {
return (Class<? extends BusinessObject>) super.getDataObjectClass();
}
/**
* The baseBusinessObjectClass is an optional parameter for specifying a base class
* for the dataObjectClass, allowing the data dictionary to index by the base class
* in addition to the current class.
*/
public void setBaseBusinessObjectClass(Class<? extends BusinessObject> baseBusinessObjectClass) {
super.setBaseDataObjectClass(baseBusinessObjectClass);
}
@BeanTagAttribute(name = "baseBusinessObjectClass")
public Class<? extends BusinessObject> getBaseBusinessObjectClass() {
return (Class<? extends BusinessObject>) super.getBaseDataObjectClass();
}
/**
* Directly validate simple fields, call completeValidation on Definition fields.
*/
@Override
public void completeValidation() {
completeValidation(new ValidationTrace());
}
@Override
public void completeValidation(ValidationTrace tracer) {
super.completeValidation(tracer);
try {
if (inactivationBlockingDefinitions != null && !inactivationBlockingDefinitions.isEmpty()) {
for (InactivationBlockingDefinition inactivationBlockingDefinition : inactivationBlockingDefinitions) {
inactivationBlockingDefinition.completeValidation(getDataObjectClass(), null, tracer.getCopy());
}
}
} catch (Exception ex) {
String currentValues[] =
{"BO Class = " + getBusinessObjectClass(), "Exception = " + ex.getMessage()};
tracer.createError("Unable to validate BO Entry", currentValues);
LOG.error("Exception while validating BusinessObjectEntry: " + getBusinessObjectClass(), ex );
}
}
@Override
public void dataDictionaryPostProcessing() {
super.dataDictionaryPostProcessing();
if (inactivationBlockingDefinitions != null) {
for (InactivationBlockingDefinition ibd : inactivationBlockingDefinitions) {
ibd.setBusinessObjectClass(getBusinessObjectClass());
if (StringUtils.isNotBlank(ibd.getBlockedReferencePropertyName())
&& ibd.getBlockedBusinessObjectClass() == null) {
// if the user didn't specify a class name for the blocked reference, determine it here
ibd.setBlockedBusinessObjectClass(DataDictionary.getAttributeClass(getDataObjectClass(),
ibd.getBlockedReferencePropertyName()));
}
ibd.setBlockingReferenceBusinessObjectClass(getBusinessObjectClass());
}
}
}
}
| bhutchinson/rice | rice-framework/krad-web-framework/src/main/java/org/kuali/rice/krad/datadictionary/BusinessObjectEntry.java | Java | apache-2.0 | 4,888 |
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2015 - ROLI Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_COLOURSELECTOR_H_INCLUDED
#define JUCE_COLOURSELECTOR_H_INCLUDED
//==============================================================================
/**
A component that lets the user choose a colour.
This shows RGB sliders and a colourspace that the user can pick colours from.
This class is also a ChangeBroadcaster, so listeners can register to be told
when the colour changes.
*/
class JUCE_API ColourSelector : public Component,
public ChangeBroadcaster,
protected SliderListener
{
public:
//==============================================================================
/** Options for the type of selector to show. These are passed into the constructor. */
enum ColourSelectorOptions
{
showAlphaChannel = 1 << 0, /**< if set, the colour's alpha channel can be changed as well as its RGB. */
showColourAtTop = 1 << 1, /**< if set, a swatch of the colour is shown at the top of the component. */
showSliders = 1 << 2, /**< if set, RGB sliders are shown at the bottom of the component. */
showColourspace = 1 << 3 /**< if set, a big HSV selector is shown. */
};
//==============================================================================
/** Creates a ColourSelector object.
The flags are a combination of values from the ColourSelectorOptions enum, specifying
which of the selector's features should be visible.
The edgeGap value specifies the amount of space to leave around the edge.
gapAroundColourSpaceComponent indicates how much of a gap to put around the
colourspace and hue selector components.
*/
ColourSelector (int flags = (showAlphaChannel | showColourAtTop | showSliders | showColourspace),
int edgeGap = 4,
int gapAroundColourSpaceComponent = 7);
/** Destructor. */
~ColourSelector();
//==============================================================================
/** Returns the colour that the user has currently selected.
The ColourSelector class is also a ChangeBroadcaster, so listeners can
register to be told when the colour changes.
@see setCurrentColour
*/
Colour getCurrentColour() const;
/** Changes the colour that is currently being shown. */
void setCurrentColour (Colour newColour);
//==============================================================================
/** Tells the selector how many preset colour swatches you want to have on the component.
To enable swatches, you'll need to override getNumSwatches(), getSwatchColour(), and
setSwatchColour(), to return the number of colours you want, and to set and retrieve
their values.
*/
virtual int getNumSwatches() const;
/** Called by the selector to find out the colour of one of the swatches.
Your subclass should return the colour of the swatch with the given index.
To enable swatches, you'll need to override getNumSwatches(), getSwatchColour(), and
setSwatchColour(), to return the number of colours you want, and to set and retrieve
their values.
*/
virtual Colour getSwatchColour (int index) const;
/** Called by the selector when the user puts a new colour into one of the swatches.
Your subclass should change the colour of the swatch with the given index.
To enable swatches, you'll need to override getNumSwatches(), getSwatchColour(), and
setSwatchColour(), to return the number of colours you want, and to set and retrieve
their values.
*/
virtual void setSwatchColour (int index, const Colour& newColour) const;
//==============================================================================
/** A set of colour IDs to use to change the colour of various aspects of the keyboard.
These constants can be used either via the Component::setColour(), or LookAndFeel::setColour()
methods.
@see Component::setColour, Component::findColour, LookAndFeel::setColour, LookAndFeel::findColour
*/
enum ColourIds
{
backgroundColourId = 0x1007000, /**< the colour used to fill the component's background. */
labelTextColourId = 0x1007001 /**< the colour used for the labels next to the sliders. */
};
private:
//==============================================================================
class ColourSpaceView;
class HueSelectorComp;
class SwatchComponent;
class ColourComponentSlider;
class ColourSpaceMarker;
class HueSelectorMarker;
friend class ColourSpaceView;
friend struct ContainerDeletePolicy<ColourSpaceView>;
friend class HueSelectorComp;
friend struct ContainerDeletePolicy<HueSelectorComp>;
Colour colour;
float h, s, v;
ScopedPointer<Slider> sliders[4];
ScopedPointer<ColourSpaceView> colourSpace;
ScopedPointer<HueSelectorComp> hueSelector;
OwnedArray<SwatchComponent> swatchComponents;
const int flags;
int edgeGap;
Rectangle<int> previewArea;
void setHue (float newH);
void setSV (float newS, float newV);
void updateHSV();
void update();
void sliderValueChanged (Slider*) override;
void paint (Graphics&) override;
void resized() override;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (ColourSelector)
#if JUCE_CATCH_DEPRECATED_CODE_MISUSE
// This constructor is here temporarily to prevent old code compiling, because the parameters
// have changed - if you get an error here, update your code to use the new constructor instead..
ColourSelector (bool);
#endif
};
#endif // JUCE_COLOURSELECTOR_H_INCLUDED
| nepholi/ScoringTable | third_party/JUCE/modules/juce_gui_extra/misc/juce_ColourSelector.h | C | apache-2.0 | 6,753 |
#region Copyright notice and license
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using System.Runtime.InteropServices;
using BenchmarkDotNet.Attributes;
using Grpc.Core.Internal;
namespace Grpc.Microbenchmarks
{
public class ScalabilityExampleBenchmark : CommonThreadedBase
{
protected override bool NeedsEnvironment => false;
// An example of testing scalability of a method that scales perfectly.
// This method provides a baseline for how well can CommonThreadedBase
// measure scalability.
const int Iterations = 50 * 1000 * 1000; // High number to make the overhead of RunConcurrent negligible.
[Benchmark(OperationsPerInvoke = Iterations)]
public void PerfectScalingExample()
{
RunConcurrent(() => { RunBody(); });
}
private int RunBody()
{
int result = 0;
for (int i = 0; i < Iterations; i++)
{
// perform some operation that is completely independent from
// other threads and therefore should scale perfectly if given
// a dedicated thread.
for (int j = 0; j < 100; j++)
{
result = result ^ i ^ j ;
}
}
return result;
}
}
}
| ctiller/grpc | src/csharp/Grpc.Microbenchmarks/ScalabityExampleBenchmark.cs | C# | apache-2.0 | 1,906 |
# frozen_string_literal: true
require 'selenium-webdriver'
require 'headless'
# WebdriverHelpers contains helper functions to create the browser to use in the UI tests.
module WebdriverHelpers
def self.start_webdriver
@headless = Headless.new
@headless.start
options = Selenium::WebDriver::Chrome::Options.new
options.add_argument('--ignore-certificate-errors')
options.add_argument('--disable-popup-blocking')
options.add_argument('--disable-translate')
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
# options.add_argument('--headless')
driver = Selenium::WebDriver.for :chrome, options: options
target_size = Selenium::WebDriver::Dimension.new(1920, 1080)
driver.manage.window.size = target_size
driver.manage.timeouts.implicit_wait = 20 # seconds
driver.manage.timeouts.page_load = 60 # seconds
driver
end
def self.stop_webdriver(driver)
driver.quit
@headless.destroy
end
end
| kyoto/tectonic-installer | tests/rspec/lib/webdriver_helpers.rb | Ruby | apache-2.0 | 987 |
/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.project;
import com.intellij.openapi.diagnostic.Attachment;
import com.intellij.openapi.diagnostic.ExceptionWithAttachments;
import com.intellij.openapi.util.Computable;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
/**
* Thrown on accessing indices when they're not ready, in so-called dumb mode. Possible fixes:
* <ul>
* <li> If {@link com.intellij.openapi.actionSystem.AnAction#actionPerformed(com.intellij.openapi.actionSystem.AnActionEvent)} is in stack trace,
* consider making the action not implement {@link DumbAware}.
*
* <li> A {@link DumbAware} action, having got this exception, may just notify the user that the requested activity is not possible while
* indexing is in progress. It can be done via a dialog (see {@link com.intellij.openapi.ui.Messages}) or a status bar balloon
* (see {@link DumbService#showDumbModeNotification(String)}, {@link com.intellij.openapi.actionSystem.ex.ActionUtil#showDumbModeWarning(com.intellij.openapi.actionSystem.AnActionEvent...)}).
*
* <li> If index access is performed from some non-urgent invokeLater activity, consider replacing it with
* {@link DumbService#smartInvokeLater(Runnable)}. Note that this 'later' can be very late, several minutes may pass. So if that code
* involves user interaction, {@link DumbService#smartInvokeLater(Runnable)} should probably not be used to avoid dialogs popping out of the blue.
*
* <li> If it's a non-urgent background process (e.g. compilation, usage search), consider replacing topmost read-action with
* {@link DumbService#runReadActionInSmartMode(Computable)}.
*
* <li> If the exception comes from within Java's findClass call, and the IDE is currently performing a user-initiated action or a
* task when skipping findClass would lead to very negative consequences (e.g. not stopping at a breakpoint), then it might be possible
* to avoid index query by using alternative resolve (and findClass) strategy, which is significantly slower and might return null. To do this,
* use {@link DumbService#setAlternativeResolveEnabled(boolean)}.
*
* <li> It's preferable to avoid the exception entirely by adding {@link DumbService#isDumb()} checks where necessary.
* </ul>
*
* @author peter
* @see DumbService
* @see DumbAware
*/
public class IndexNotReadyException extends RuntimeException implements ExceptionWithAttachments {
@Nullable private final Throwable myStartTrace;
public IndexNotReadyException() {
this(null);
}
public IndexNotReadyException(@Nullable Throwable startTrace) {
super("Please change caller according to " + IndexNotReadyException.class.getName() + " documentation");
myStartTrace = startTrace;
}
@NotNull
@Override
public Attachment[] getAttachments() {
return myStartTrace == null
? Attachment.EMPTY_ARRAY
: new Attachment[]{new Attachment("indexingStart", myStartTrace)};
}
}
| hurricup/intellij-community | platform/core-api/src/com/intellij/openapi/project/IndexNotReadyException.java | Java | apache-2.0 | 3,551 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications are canned architectures with pre-trained weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications import inception_v3
from tensorflow.python.keras.applications import mobilenet
from tensorflow.python.keras.applications import resnet50
from tensorflow.python.keras.applications import vgg16
from tensorflow.python.keras.applications import vgg19
from tensorflow.python.keras.applications import xception
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.resnet50 import ResNet50
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.vgg19 import VGG19
from tensorflow.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
| tornadozou/tensorflow | tensorflow/python/keras/applications/__init__.py | Python | apache-2.0 | 1,675 |
//===--- TransEmptyStatements.cpp - Transformations to ARC mode -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// removeEmptyStatementsAndDealloc:
//
// Removes empty statements that are leftovers from previous transformations.
// e.g for
//
// [x retain];
//
// removeRetainReleaseDealloc will leave an empty ";" that removeEmptyStatements
// will remove.
//
//===----------------------------------------------------------------------===//
#include "Transforms.h"
#include "Internals.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/SourceManager.h"
using namespace clang;
using namespace arcmt;
using namespace trans;
static bool isEmptyARCMTMacroStatement(NullStmt *S,
std::vector<SourceLocation> &MacroLocs,
ASTContext &Ctx) {
if (!S->hasLeadingEmptyMacro())
return false;
SourceLocation SemiLoc = S->getSemiLoc();
if (SemiLoc.isInvalid() || SemiLoc.isMacroID())
return false;
if (MacroLocs.empty())
return false;
SourceManager &SM = Ctx.getSourceManager();
std::vector<SourceLocation>::iterator
I = std::upper_bound(MacroLocs.begin(), MacroLocs.end(), SemiLoc,
BeforeThanCompare<SourceLocation>(SM));
--I;
SourceLocation
AfterMacroLoc = I->getLocWithOffset(getARCMTMacroName().size());
assert(AfterMacroLoc.isFileID());
if (AfterMacroLoc == SemiLoc)
return true;
int RelOffs = 0;
if (!SM.isInSameSLocAddrSpace(AfterMacroLoc, SemiLoc, &RelOffs))
return false;
if (RelOffs < 0)
return false;
// We make the reasonable assumption that a semicolon after 100 characters
// means that it is not the next token after our macro. If this assumption
// fails it is not critical, we will just fail to clear out, e.g., an empty
// 'if'.
if (RelOffs - getARCMTMacroName().size() > 100)
return false;
SourceLocation AfterMacroSemiLoc = findSemiAfterLocation(AfterMacroLoc, Ctx);
return AfterMacroSemiLoc == SemiLoc;
}
namespace {
/// \brief Returns true if the statement became empty due to previous
/// transformations.
class EmptyChecker : public StmtVisitor<EmptyChecker, bool> {
ASTContext &Ctx;
std::vector<SourceLocation> &MacroLocs;
public:
EmptyChecker(ASTContext &ctx, std::vector<SourceLocation> ¯oLocs)
: Ctx(ctx), MacroLocs(macroLocs) { }
bool VisitNullStmt(NullStmt *S) {
return isEmptyARCMTMacroStatement(S, MacroLocs, Ctx);
}
bool VisitCompoundStmt(CompoundStmt *S) {
if (S->body_empty())
return false; // was already empty, not because of transformations.
for (CompoundStmt::body_iterator
I = S->body_begin(), E = S->body_end(); I != E; ++I)
if (!Visit(*I))
return false;
return true;
}
bool VisitIfStmt(IfStmt *S) {
if (S->getConditionVariable())
return false;
Expr *condE = S->getCond();
if (!condE)
return false;
if (hasSideEffects(condE, Ctx))
return false;
if (!S->getThen() || !Visit(S->getThen()))
return false;
if (S->getElse() && !Visit(S->getElse()))
return false;
return true;
}
bool VisitWhileStmt(WhileStmt *S) {
if (S->getConditionVariable())
return false;
Expr *condE = S->getCond();
if (!condE)
return false;
if (hasSideEffects(condE, Ctx))
return false;
if (!S->getBody())
return false;
return Visit(S->getBody());
}
bool VisitDoStmt(DoStmt *S) {
Expr *condE = S->getCond();
if (!condE)
return false;
if (hasSideEffects(condE, Ctx))
return false;
if (!S->getBody())
return false;
return Visit(S->getBody());
}
bool VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
Expr *Exp = S->getCollection();
if (!Exp)
return false;
if (hasSideEffects(Exp, Ctx))
return false;
if (!S->getBody())
return false;
return Visit(S->getBody());
}
bool VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
if (!S->getSubStmt())
return false;
return Visit(S->getSubStmt());
}
};
class EmptyStatementsRemover :
public RecursiveASTVisitor<EmptyStatementsRemover> {
MigrationPass &Pass;
public:
EmptyStatementsRemover(MigrationPass &pass) : Pass(pass) { }
bool TraverseStmtExpr(StmtExpr *E) {
CompoundStmt *S = E->getSubStmt();
for (CompoundStmt::body_iterator
I = S->body_begin(), E = S->body_end(); I != E; ++I) {
if (I != E - 1)
check(*I);
TraverseStmt(*I);
}
return true;
}
bool VisitCompoundStmt(CompoundStmt *S) {
for (CompoundStmt::body_iterator
I = S->body_begin(), E = S->body_end(); I != E; ++I)
check(*I);
return true;
}
ASTContext &getContext() { return Pass.Ctx; }
private:
void check(Stmt *S) {
if (!S) return;
if (EmptyChecker(Pass.Ctx, Pass.ARCMTMacroLocs).Visit(S)) {
Transaction Trans(Pass.TA);
Pass.TA.removeStmt(S);
}
}
};
} // anonymous namespace
static bool isBodyEmpty(CompoundStmt *body, ASTContext &Ctx,
std::vector<SourceLocation> &MacroLocs) {
for (CompoundStmt::body_iterator
I = body->body_begin(), E = body->body_end(); I != E; ++I)
if (!EmptyChecker(Ctx, MacroLocs).Visit(*I))
return false;
return true;
}
static void cleanupDeallocOrFinalize(MigrationPass &pass) {
ASTContext &Ctx = pass.Ctx;
TransformActions &TA = pass.TA;
DeclContext *DC = Ctx.getTranslationUnitDecl();
Selector FinalizeSel =
Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize"));
typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
impl_iterator;
for (impl_iterator I = impl_iterator(DC->decls_begin()),
E = impl_iterator(DC->decls_end()); I != E; ++I) {
ObjCMethodDecl *DeallocM = 0;
ObjCMethodDecl *FinalizeM = 0;
for (ObjCImplementationDecl::instmeth_iterator
MI = I->instmeth_begin(),
ME = I->instmeth_end(); MI != ME; ++MI) {
ObjCMethodDecl *MD = *MI;
if (!MD->hasBody())
continue;
if (MD->getMethodFamily() == OMF_dealloc) {
DeallocM = MD;
} else if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
FinalizeM = MD;
}
}
if (DeallocM) {
if (isBodyEmpty(DeallocM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
Transaction Trans(TA);
TA.remove(DeallocM->getSourceRange());
}
if (FinalizeM) {
Transaction Trans(TA);
TA.remove(FinalizeM->getSourceRange());
}
} else if (FinalizeM) {
if (isBodyEmpty(FinalizeM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
Transaction Trans(TA);
TA.remove(FinalizeM->getSourceRange());
} else {
Transaction Trans(TA);
TA.replaceText(FinalizeM->getSelectorStartLoc(), "finalize", "dealloc");
}
}
}
}
void trans::removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass) {
EmptyStatementsRemover(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
cleanupDeallocOrFinalize(pass);
for (unsigned i = 0, e = pass.ARCMTMacroLocs.size(); i != e; ++i) {
Transaction Trans(pass.TA);
pass.TA.remove(pass.ARCMTMacroLocs[i]);
}
}
| santoshn/softboundcets-34 | softboundcets-llvm-clang34/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp | C++ | bsd-3-clause | 7,557 |
/***** Vxa_VTaskCursor Implementation *****/
/************************
************************
***** Interfaces *****
************************
************************/
/********************
***** System *****
********************/
#include "Vk.h"
/******************
***** Self *****
******************/
#include "Vxa_VTaskCursor.h"
/************************
***** Supporting *****
************************/
#include "Vxa_VFiniteSet.h"
/******************************
******************************
***** *****
***** Vxa::VTaskCursor *****
***** *****
******************************
******************************/
/**************************
**************************
***** Construction *****
**************************
**************************/
Vxa::VTaskCursor::VTaskCursor (VFiniteSet *pDomain) : BaseClass (pDomain), m_bAlive (true) {
}
/*************************
*************************
***** Destruction *****
*************************
*************************/
Vxa::VTaskCursor::~VTaskCursor () {
}
| MichaelJCaruso/vision | software/src/master/src/kernel/Vxa_VTaskCursor.cpp | C++ | bsd-3-clause | 1,107 |
package com.simplyti.cloud.server.benchmark;
import java.util.concurrent.ExecutionException;
import com.google.inject.AbstractModule;
import com.simplyti.cloud.server.benchmark.tests.JsonSerialization;
import com.simplyti.cloud.server.benchmark.tests.Plaintext;
import com.simplyti.service.builder.di.guice.GuiceService;
public class Main extends AbstractModule {
public static void main(String...strings) throws InterruptedException, ExecutionException {
GuiceService.builder()
.withLog4J2Logger()
.withName("simple-server")
.withApi(JsonSerialization.class)
.withApi(Plaintext.class)
.build().start().get()
.stopFuture().await();
}
}
| sumeetchhetri/FrameworkBenchmarks | frameworks/Java/simple-server/src/main/java/com/simplyti/cloud/server/benchmark/Main.java | Java | bsd-3-clause | 666 |
<!DOCTYPE html>
<!--
Distributed under both the W3C Test Suite License [1] and the W3C
3-clause BSD License [2]. To contribute to a W3C Test Suite, see the
policies and contribution forms [3].
[1] http://www.w3.org/Consortium/Legal/2008/04-testsuite-license
[2] http://www.w3.org/Consortium/Legal/2008/03-bsd-license
[3] http://www.w3.org/2004/10/27-testcases
-->
<html>
<head>
<title>Shadow DOM Test: A_10_01_02_09</title>
<link rel="author" title="Sergey G. Grekhov" href="mailto:sgrekhov@unipro.ru">
<link rel="help" href="http://www.w3.org/TR/2013/WD-shadow-dom-20130514/#shadow-root-methods">
<meta name="assert" content="ShadowRoot Object: Invoking the cloneNode() method on a ShadowRoot instance must always throw a DATA_CLONE_ERR exception.">
<script src="../../../../../../../resources/testharness.js"></script>
<script src="../../../../../../../resources/testharnessreport.js"></script>
<script src="../../../testcommon.js"></script>
<link rel="stylesheet" href="../../../../../../../resources/testharness.css">
</head>
<body>
<div id="log"></div>
<script>
test(unit(function (ctx) {
var d = newRenderedHTMLDocument(ctx);
var host = d.createElement('div');
d.body.appendChild(host);
var s = host.createShadowRoot();
try {
s.cloneNode();
assert_true(false, 'Invoking the cloneNode() method on a ShadowRoot instance must always ' +
'throw a DATA_CLONE_ERR exception.');
} catch (e) {
assert_equals(e.code, 25, 'Wrong exceprion type');
}
}), 'A_10_01_02_09_T01');
</script>
</body>
</html>
| js0701/chromium-crosswalk | third_party/WebKit/LayoutTests/imported/web-platform-tests/shadow-dom/untriaged/elements-and-dom-objects/shadowroot-object/shadowroot-methods/test-010.html | HTML | bsd-3-clause | 1,520 |
<!DOCTYPE html>
<meta charset="utf-8"/>
<script>
async function onLoad() {
const params = new URLSearchParams(self.location.search);
const scope = self.origin + params.get('scopepath');
const reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) {
await reg.unregister();
}
if (window.opener) {
window.opener.postMessage({ type: 'SW-UNREGISTERED' }, '*');
} else {
window.top.postMessage({ type: 'SW-UNREGISTERED' }, '*');
}
}
self.addEventListener('load', onLoad);
</script>
| chromium/chromium | third_party/blink/web_tests/external/wpt/service-workers/service-worker/resources/unregister-rewrite-worker.html | HTML | bsd-3-clause | 519 |
package report
import (
"bytes"
"io"
"os"
"testing"
"github.com/remyoudompheng/go-misc/pprof/parser"
)
func TestCpuProfileGraph(t *testing.T) {
syms := readSymbols("testdata/cpu.prof.symbols")
resolve := func(u uint64) string { s, _ := lookup(u, syms); return s }
f, err := os.Open("testdata/cpu.prof")
if err != nil {
t.Fatal(err)
}
defer f.Close()
p, err := parser.NewCpuProfParser(f)
if err != nil {
t.Fatal(err)
}
reporter := &Reporter{Resolver: resolve}
total := int64(0)
for {
trace, count, err := p.ReadTrace()
if trace == nil && err == io.EOF {
break
}
reporter.Add(trace, int64(count))
total += int64(count)
}
g := reporter.GraphByFunc(ColCPU)
t.Logf("%#v", g)
report := GraphReport{
Prog: "pprof.test",
Total: total,
Unit: "samples",
Graph: g,
}
buf := new(bytes.Buffer)
err = graphvizTpl.Execute(buf, report)
if err != nil {
t.Fatal(err)
}
t.Log(buf.String())
}
| tylerb/go-misc | pprof/report/graph_test.go | GO | bsd-3-clause | 935 |
var struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n =
[
[ "BasePriority", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#a83350567ec26fc4723ac14b5864ae4f9", null ],
[ "ClientId", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#a5f4ab183c0202edb1e4b9a2be1ace0db", null ],
[ "ContextSwitches", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#ac52132a613e80356b9fbdeab8f010d53", null ],
[ "CreateTime", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#a440670e511c4480f7017340c5ebe7a2f", null ],
[ "KernelTime", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#a3e9ff2a68079e122720519502209e5e1", null ],
[ "Priority", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#aa993a3a9535780f563e42fcb4e4f32c6", null ],
[ "StartAddress", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#a9295c62e359e2ba76519e05baf2a4087", null ],
[ "ThreadState", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#ac5dc1ba3985f4cac702124910e7712c7", null ],
[ "UserTime", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#a860336a48384d8088ffd8c0abe38d170", null ],
[ "WaitReason", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#a250406522090d84f67d275aff60e6079", null ],
[ "WaitTime", "struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.html#ad7e29844c70b124dfbb6d57a52724d21", null ]
]; | shibbyr/Blackbone | doc/driver/html/struct___s_y_s_t_e_m___t_h_r_e_a_d___i_n_f_o_r_m_a_t_i_o_n.js | JavaScript | mit | 1,489 |
<?php
namespace Concrete\Controller\SinglePage\Dashboard\System\Express\Entities;
use Concrete\Core\Attribute\CategoryObjectInterface;
use Concrete\Core\Attribute\Type;
use Concrete\Core\Page\Controller\DashboardAttributesPageController;
class Attributes extends DashboardAttributesPageController
{
protected $category;
protected function getEntity($id)
{
$r = $this->entityManager->getRepository('\Concrete\Core\Entity\Express\Entity');
$this->category = $r->findOneById($id);
return $this->category;
}
protected function getCategoryObject()
{
return $this->category;
}
public function view($id = null)
{
$entity = $this->getEntity($id);
$this->set('entity', $entity);
$this->renderList($entity->getAttributes(), Type::getAttributeTypeList());
}
protected function getHeaderMenu(CategoryObjectInterface $category)
{
return false;
}
public function edit($id = null, $akID = null)
{
$this->set('entity', $this->getEntity($id));
$r = $this->entityManager->getRepository('\Concrete\Core\Entity\Attribute\Key\Key');
$key = $r->findOneBy(array('akID' => $akID));
$this->renderEdit($key,
\URL::to('/dashboard/system/express/entities/attributes', 'view', $id)
);
}
public function update($id = null, $akID = null)
{
$this->edit($id, $akID);
$entity = $this->getEntity($id);
$this->set('entity', $entity);
$r = $this->entityManager->getRepository('\Concrete\Core\Entity\Attribute\Key\Key');
$key = $r->findOneBy(array('akID' => $akID));
$this->executeUpdate($key,
\URL::to('/dashboard/system/express/entities/attributes', 'view', $id)
);
}
public function select_type($id = null, $type = null)
{
$this->set('entity', $this->getEntity($id));
$type = Type::getByID($type);
$this->renderAdd($type,
\URL::to('/dashboard/system/express/entities/attributes', 'view', $id)
);
}
public function add($id = null, $type = null)
{
$this->select_type($id, $type);
$type = Type::getByID($type);
$entity = $this->getEntity($id);
$this->set('entity', $entity);
$this->executeAdd($type, \URL::to('/dashboard/system/express/entities/attributes', 'view', $id));
}
public function delete($id = null, $akID = null)
{
$entity = $this->getEntity($id);
$factory = $this->app->make('Concrete\Core\Attribute\Category\ExpressCategory');
$key = $factory->getAttributeKeyByID($akID);
$this->set('entity', $entity);
$this->executeDelete($key,
\URL::to('/dashboard/system/express/entities/attributes', 'view', $id)
);
}
}
| noLunch/nolunch | concrete5/concrete/controllers/single_page/dashboard/system/express/entities/attributes.php | PHP | mit | 2,834 |
/*
* Copyright (C) 2010 Google Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @constructor
* @param {string} label
* @param {string} className
* @param {string=} tooltip
*/
WebInspector.Checkbox = function(label, className, tooltip)
{
this.element = document.createElementWithClass("label", className);
this._inputElement = this.element.createChild("input");
this._inputElement.type = "checkbox";
this.element.createTextChild(label);
if (tooltip)
this.element.title = tooltip;
}
WebInspector.Checkbox.prototype = {
set checked(checked)
{
this._inputElement.checked = checked;
},
get checked()
{
return this._inputElement.checked;
},
addEventListener: function(listener)
{
function listenerWrapper(event)
{
if (listener)
listener(event);
event.consume();
return true;
}
this._inputElement.addEventListener("click", listenerWrapper, false);
this.element.addEventListener("click", listenerWrapper, false);
}
}
| omphalos/panopticonsole | vendor/blink/ui/Checkbox.js | JavaScript | mit | 2,349 |
<?php
namespace Illuminate\View\Compilers;
use Illuminate\Filesystem\Filesystem;
use InvalidArgumentException;
abstract class Compiler
{
/**
* The Filesystem instance.
*
* @var \Illuminate\Filesystem\Filesystem
*/
protected $files;
/**
* Get the cache path for the compiled views.
*
* @var string
*/
protected $cachePath;
/**
* Create a new compiler instance.
*
* @param \Illuminate\Filesystem\Filesystem $files
* @param string $cachePath
* @return void
*
* @throws \InvalidArgumentException
*/
public function __construct(Filesystem $files, $cachePath)
{
if (! $cachePath) {
throw new InvalidArgumentException('Please provide a valid cache path.');
}
$this->files = $files;
$this->cachePath = $cachePath;
}
/**
* Get the path to the compiled version of a view.
*
* @param string $path
* @return string
*/
public function getCompiledPath($path)
{
return $this->cachePath.'/'.sha1($path).'.php';
}
/**
* Determine if the view at the given path is expired.
*
* @param string $path
* @return bool
*/
public function isExpired($path)
{
$compiled = $this->getCompiledPath($path);
// If the compiled file doesn't exist we will indicate that the view is expired
// so that it can be re-compiled. Else, we will verify the last modification
// of the views is less than the modification times of the compiled views.
if (! $this->files->exists($compiled)) {
return true;
}
return $this->files->lastModified($path) >=
$this->files->lastModified($compiled);
}
}
| drakakisgeo/mailtester | vendor/laravel/framework/src/Illuminate/View/Compilers/Compiler.php | PHP | mit | 1,797 |
<?php
/*
* This file is part of sebastian/comparator.
*
* (c) Sebastian Bergmann <sebastian@phpunit.de>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace SebastianBergmann\Comparator;
/**
* A struct.
*/
class Struct
{
public $var;
public function __construct($var)
{
$this->var = $var;
}
}
| smoers/bird | vendor/sebastian/comparator/tests/_fixture/Struct.php | PHP | mit | 416 |
# [Bootstrap](http://getbootstrap.com) [](http://badge.fury.io/bo/bootstrap) [](http://travis-ci.org/twbs/bootstrap) [](https://david-dm.org/twbs/bootstrap#info=devDependencies)
[](https://saucelabs.com/u/bootstrap)
Bootstrap is a sleek, intuitive, and powerful front-end framework for faster and easier web development, created by [Mark Otto](http://twitter.com/mdo) and [Jacob Thornton](http://twitter.com/fat), and maintained by the [core team](https://github.com/twbs?tab=members) with the massive support and involvement of the community.
To get started, check out <http://getbootstrap.com>!
## Table of contents
- [Quick start](#quick-start)
- [Bugs and feature requests](#bugs-and-feature-requests)
- [Documentation](#documentation)
- [Compiling CSS and JavaScript](#compiling-css-and-javascript)
- [Contributing](#contributing)
- [Community](#community)
- [Versioning](#versioning)
- [Authors](#authors)
- [Copyright and license](#copyright-and-license)
## Quick start
Three quick start options are available:
- [Download the latest release](https://github.com/twbs/bootstrap/archive/v3.1.1.zip).
- Clone the repo: `git clone https://github.com/twbs/bootstrap.git`.
- Install with [Bower](http://bower.io): `bower install bootstrap`.
Read the [Getting Started page](http://getbootstrap.com/getting-started/) for information on the framework contents, templates and examples, and more.
### What's included
Within the download you'll find the following directories and files, logically grouping common assets and providing both compiled and minified variations. You'll see something like this:
```
bootstrap/
├── css/
│ ├── bootstrap.css
│ ├── bootstrap.min.css
│ ├── bootstrap-theme.css
│ └── bootstrap-theme.min.css
├── js/
│ ├── bootstrap.js
│ └── bootstrap.min.js
└── fonts/
├── glyphicons-halflings-regular.eot
├── glyphicons-halflings-regular.svg
├── glyphicons-halflings-regular.ttf
└── glyphicons-halflings-regular.woff
```
We provide compiled CSS and JS (`bootstrap.*`), as well as compiled and minified CSS and JS (`bootstrap.min.*`). Fonts from Glyphicons are included, as is the optional Bootstrap theme.
## Bugs and feature requests
Have a bug or a feature request? Please first read the [issue guidelines](https://github.com/twbs/bootstrap/blob/master/CONTRIBUTING.md#using-the-issue-tracker) and search for existing and closed issues. If your problem or idea is not addressed yet, [please open a new issue](https://github.com/twbs/bootstrap/issues/new).
## Documentation
Bootstrap's documentation, included in this repo in the root directory, is built with [Jekyll](http://jekyllrb.com) and publicly hosted on GitHub Pages at <http://getbootstrap.com>. The docs may also be run locally.
### Running documentation locally
1. If necessary, [install Jekyll](http://jekyllrb.com/docs/installation) (requires v1.x).
- **Windows users:** Read [this unofficial guide](https://github.com/juthilo/run-jekyll-on-windows/) to get Jekyll up and running without problems. We use Pygments for syntax highlighting, so make sure to read the sections on installing Python and Pygments.
2. From the root `/bootstrap` directory, run `jekyll serve` in the command line.
- **Windows users:** While we use Jekyll's `encoding` setting, you might still need to change the command prompt's character encoding ([code page](http://en.wikipedia.org/wiki/Windows_code_page)) to UTF-8 so Jekyll runs without errors. For Ruby 2.0.0, run `chcp 65001` first. For Ruby 1.9.3, you can alternatively do `SET LANG=en_EN.UTF-8`.
3. Open <http://localhost:9001> in your browser, and voilà.
Learn more about using Jekyll by reading its [documentation](http://jekyllrb.com/docs/home/).
### Documentation for previous releases
Documentation for v2.3.2 has been made available for the time being at <http://getbootstrap.com/2.3.2/> while folks transition to Bootstrap 3.
[Previous releases](https://github.com/twbs/bootstrap/releases) and their documentation are also available for download.
## Compiling CSS and JavaScript
Bootstrap uses [Grunt](http://gruntjs.com/) with convenient methods for working with the framework. It's how we compile our code, run tests, and more. To use it, install the required dependencies as directed and then run some Grunt commands.
### Install Grunt
From the command line:
1. Install `grunt-cli` globally with `npm install -g grunt-cli`.
2. Navigate to the root `/bootstrap` directory, then run `npm install`. npm will look at [package.json](https://github.com/twbs/bootstrap/blob/master/package.json) and automatically install the necessary local dependencies listed there.
When completed, you'll be able to run the various Grunt commands provided from the command line.
**Unfamiliar with `npm`? Don't have node installed?** That's a-okay. npm stands for [node packaged modules](http://npmjs.org/) and is a way to manage development dependencies through node.js. [Download and install node.js](http://nodejs.org/download/) before proceeding.
### Available Grunt commands
#### Build - `grunt`
Run `grunt` to run tests locally and compile the CSS and JavaScript into `/dist`. **Uses [Less](http://lesscss.org/) and [UglifyJS](http://lisperator.net/uglifyjs/).**
#### Only compile CSS and JavaScript - `grunt dist`
`grunt dist` creates the `/dist` directory with compiled files. **Uses [Less](http://lesscss.org/) and [UglifyJS](http://lisperator.net/uglifyjs/).**
#### Tests - `grunt test`
Runs [JSHint](http://jshint.com) and [QUnit](http://qunitjs.com/) tests headlessly in [PhantomJS](http://phantomjs.org/) (used for CI).
#### Watch - `grunt watch`
This is a convenience method for watching just Less files and automatically building them whenever you save.
### Troubleshooting dependencies
Should you encounter problems with installing dependencies or running Grunt commands, uninstall all previous dependency versions (global and local). Then, rerun `npm install`.
## Contributing
Please read through our [contributing guidelines](https://github.com/twbs/bootstrap/blob/master/CONTRIBUTING.md). Included are directions for opening issues, coding standards, and notes on development.
Moreover, if your pull request contains JavaScript patches or features, you must include relevant unit tests. All HTML and CSS should conform to the [Code Guide](http://github.com/mdo/code-guide), maintained by [Mark Otto](http://github.com/mdo).
Editor preferences are available in the [editor config](https://github.com/twbs/bootstrap/blob/master/.editorconfig) for easy use in common text editors. Read more and download plugins at <http://editorconfig.org>.
## Community
Keep track of development and community news.
- Follow [@twbootstrap on Twitter](http://twitter.com/twbootstrap).
- Read and subscribe to [The Official Bootstrap Blog](http://blog.getbootstrap.com).
- Chat with fellow Bootstrappers in IRC. On the `irc.freenode.net` server, in the `##twitter-bootstrap` channel.
- Implementation help may be found at Stack Overflow (tagged [`twitter-bootstrap-3`](http://stackoverflow.com/questions/tagged/twitter-bootstrap-3)).
## Versioning
For transparency into our release cycle and in striving to maintain backward compatibility, Bootstrap is maintained under the Semantic Versioning guidelines. Sometimes we screw up, but we'll adhere to these rules whenever possible.
Releases will be numbered with the following format:
`<major>.<minor>.<patch>`
And constructed with the following guidelines:
- Breaking backward compatibility **bumps the major** while resetting minor and patch
- New additions without breaking backward compatibility **bumps the minor** while resetting the patch
- Bug fixes and misc changes **bumps only the patch**
For more information on SemVer, please visit <http://semver.org/>.
## Authors
**Mark Otto**
- <http://twitter.com/mdo>
- <http://github.com/mdo>
**Jacob Thornton**
- <http://twitter.com/fat>
- <http://github.com/fat>
## Copyright and license
Code and documentation copyright 2011-2014 Twitter, Inc. Code released under [the MIT license](LICENSE). Docs released under [Creative Commons](docs/LICENSE).
| ybarghane/AnnuaireResteaux | web/bundles/sonataadmin/vendor/bootstrap/README.md | Markdown | mit | 8,717 |
# A visitor for converting a static Sass tree into a static CSS tree.
class Sass::Tree::Visitors::Cssize < Sass::Tree::Visitors::Base
# @param root [Tree::Node] The root node of the tree to visit.
# @return [(Tree::Node, Sass::Util::SubsetMap)] The resulting tree of static nodes
# *and* the extensions defined for this tree
def self.visit(root); super; end
protected
# Returns the immediate parent of the current node.
# @return [Tree::Node]
attr_reader :parent
def initialize
@parent_directives = []
@extends = Sass::Util::SubsetMap.new
end
# If an exception is raised, this adds proper metadata to the backtrace.
def visit(node)
super(node)
rescue Sass::SyntaxError => e
e.modify_backtrace(:filename => node.filename, :line => node.line)
raise e
end
# Keeps track of the current parent node.
def visit_children(parent)
with_parent parent do
parent.children = super.flatten
parent
end
end
MERGEABLE_DIRECTIVES = [Sass::Tree::MediaNode]
# Runs a block of code with the current parent node
# replaced with the given node.
#
# @param parent [Tree::Node] The new parent for the duration of the block.
# @yield A block in which the parent is set to `parent`.
# @return [Object] The return value of the block.
def with_parent(parent)
if parent.is_a?(Sass::Tree::DirectiveNode)
if MERGEABLE_DIRECTIVES.any? {|klass| parent.is_a?(klass)}
old_parent_directive = @parent_directives.pop
end
@parent_directives.push parent
end
old_parent, @parent = @parent, parent
yield
ensure
@parent_directives.pop if parent.is_a?(Sass::Tree::DirectiveNode)
@parent_directives.push old_parent_directive if old_parent_directive
@parent = old_parent
end
# In Ruby 1.8, ensures that there's only one `@charset` directive
# and that it's at the top of the document.
#
# @return [(Tree::Node, Sass::Util::SubsetMap)] The resulting tree of static nodes
# *and* the extensions defined for this tree
def visit_root(node)
yield
if parent.nil?
# In Ruby 1.9 we can make all @charset nodes invisible
# and infer the final @charset from the encoding of the final string.
if Sass::Util.ruby1_8?
charset = node.children.find {|c| c.is_a?(Sass::Tree::CharsetNode)}
node.children.reject! {|c| c.is_a?(Sass::Tree::CharsetNode)}
node.children.unshift charset if charset
end
imports = Sass::Util.extract!(node.children) do |c|
c.is_a?(Sass::Tree::DirectiveNode) && !c.is_a?(Sass::Tree::MediaNode) &&
c.resolved_value =~ /^@import /i
end
charset_and_index = Sass::Util.ruby1_8? &&
node.children.each_with_index.find {|c, _| c.is_a?(Sass::Tree::CharsetNode)}
if charset_and_index
index = charset_and_index.last
node.children = node.children[0..index] + imports + node.children[index+1..-1]
else
node.children = imports + node.children
end
end
return node, @extends
rescue Sass::SyntaxError => e
e.sass_template ||= node.template
raise e
end
# A simple struct wrapping up information about a single `@extend` instance. A
# single [ExtendNode] can have multiple Extends if either the parent node or
# the extended selector is a comma sequence.
#
# @attr extender [Sass::Selector::Sequence]
# The selector of the CSS rule containing the `@extend`.
# @attr target [Array<Sass::Selector::Simple>] The selector being `@extend`ed.
# @attr node [Sass::Tree::ExtendNode] The node that produced this extend.
# @attr directives [Array<Sass::Tree::DirectiveNode>]
# The directives containing the `@extend`.
# @attr result [Symbol]
# The result of this extend. One of `:not_found` (the target doesn't exist
# in the document), `:failed_to_unify` (the target exists but cannot be
# unified with the extender), or `:succeeded`.
Extend = Struct.new(:extender, :target, :node, :directives, :result)
# Registers an extension in the `@extends` subset map.
def visit_extend(node)
node.resolved_selector.members.each do |seq|
if seq.members.size > 1
raise Sass::SyntaxError.new("Can't extend #{seq.to_a.join}: can't extend nested selectors")
end
sseq = seq.members.first
if !sseq.is_a?(Sass::Selector::SimpleSequence)
raise Sass::SyntaxError.new("Can't extend #{seq.to_a.join}: invalid selector")
elsif sseq.members.any? {|ss| ss.is_a?(Sass::Selector::Parent)}
raise Sass::SyntaxError.new("Can't extend #{seq.to_a.join}: can't extend parent selectors")
end
sel = sseq.members
parent.resolved_rules.members.each do |seq|
if !seq.members.last.is_a?(Sass::Selector::SimpleSequence)
raise Sass::SyntaxError.new("#{seq} can't extend: invalid selector")
end
@extends[sel] = Extend.new(seq, sel, node, @parent_directives.dup, :not_found)
end
end
[]
end
# Modifies exception backtraces to include the imported file.
def visit_import(node)
# Don't use #visit_children to avoid adding the import node to the list of parents.
node.children.map {|c| visit(c)}.flatten
rescue Sass::SyntaxError => e
e.modify_backtrace(:filename => node.children.first.filename)
e.add_backtrace(:filename => node.filename, :line => node.line)
raise e
end
# Bubbles the `@media` directive up through RuleNodes
# and merges it with other `@media` directives.
def visit_media(node)
yield unless bubble(node)
media = node.children.select {|c| c.is_a?(Sass::Tree::MediaNode)}
node.children.reject! {|c| c.is_a?(Sass::Tree::MediaNode)}
media = media.select {|n| n.resolved_query = n.resolved_query.merge(node.resolved_query)}
(node.children.empty? ? [] : [node]) + media
end
# Bubbles the `@supports` directive up through RuleNodes.
def visit_supports(node)
yield unless bubble(node)
node
end
# Asserts that all the traced children are valid in their new location.
def visit_trace(node)
# Don't use #visit_children to avoid adding the trace node to the list of parents.
node.children.map {|c| visit(c)}.flatten
rescue Sass::SyntaxError => e
e.modify_backtrace(:mixin => node.name, :filename => node.filename, :line => node.line)
e.add_backtrace(:filename => node.filename, :line => node.line)
raise e
end
# Converts nested properties into flat properties
# and updates the indentation of the prop node based on the nesting level.
def visit_prop(node)
if parent.is_a?(Sass::Tree::PropNode)
node.resolved_name = "#{parent.resolved_name}-#{node.resolved_name}"
node.tabs = parent.tabs + (parent.resolved_value.empty? ? 0 : 1) if node.style == :nested
end
yield
result = node.children.dup
if !node.resolved_value.empty? || node.children.empty?
node.send(:check!)
result.unshift(node)
end
result
end
# Resolves parent references and nested selectors,
# and updates the indentation of the rule node based on the nesting level.
def visit_rule(node)
parent_resolved_rules = parent.is_a?(Sass::Tree::RuleNode) ? parent.resolved_rules : nil
# It's possible for resolved_rules to be set if we've duplicated this node during @media bubbling
node.resolved_rules ||= node.parsed_rules.resolve_parent_refs(parent_resolved_rules)
yield
rules = node.children.select {|c| c.is_a?(Sass::Tree::RuleNode) || c.bubbles?}
props = node.children.reject {|c| c.is_a?(Sass::Tree::RuleNode) || c.bubbles? || c.invisible?}
unless props.empty?
node.children = props
rules.each {|r| r.tabs += 1} if node.style == :nested
rules.unshift(node)
end
rules.last.group_end = true unless parent.is_a?(Sass::Tree::RuleNode) || rules.empty?
rules
end
private
def bubble(node)
return unless parent.is_a?(Sass::Tree::RuleNode)
new_rule = parent.dup
new_rule.children = node.children
node.children = with_parent(node) {Array(visit(new_rule))}
# If the last child is actually the end of the group,
# the parent's cssize will set it properly
node.children.last.group_end = false unless node.children.empty?
true
end
end
| kmcminn/rails_survey | vendor/gems/sass-3.2.10/lib/sass/tree/visitors/cssize.rb | Ruby | mit | 8,263 |
/**
* Template JS for Internet Explorer 8 and lower - mainly workaround for missing selectors
*/
(function($)
{
// Standard template setup for IE
$.fn.addTemplateSetup(function()
{
// Clean existing classes
this.find('.first-child').removeClass('first-child');
this.find('.last-child').removeClass('last-child');
this.find('.last-of-type').removeClass('last-of-type');
this.find('.even').removeClass('even');
this.find('.odd').removeClass('odd');
// Missing selectors
this.find(':first-child').addClass('first-child');
this.find(':last-child').addClass('last-child');
// Specific classes
this.find('.head').each(function () { $(this).children('div:last').addClass('last-of-type'); });
this.find('tbody tr:even, .task-dialog > li:even, .planning > li.planning-header > ul > li:even').addClass('even');
this.find('tbody tr:odd, .planning > li:odd').addClass('odd');
this.find('.form fieldset:has(legend)').addClass('fieldset-with-legend').filter(':first-child').addClass('fieldset-with-legend-first-child');
// Disabled buttons
this.find('button:disabled').addClass('disabled');
// IE 7
if ($.browser.version < 8)
{
// Clean existing classes
this.find('.after-h1').removeClass('after-h1');
this.find('.block-content h1:first-child, .block-content .h1:first-child').next().addClass('after-h1');
this.find('.calendar .add-event').prepend('<span class="before"></span>');
}
// Input switches
this.find('input[type=radio].switch:checked + .switch-replace, input[type=checkbox].switch:checked + .switch-replace').addClass('switch-replace-checked');
this.find('input[type=radio].switch:disabled + .switch-replace, input[type=checkbox].switch:disabled + .switch-replace').addClass('switch-replace-disabled');
this.find('input[type=radio].mini-switch:checked + .mini-switch-replace, input[type=checkbox].mini-switch:checked + .mini-switch-replace').addClass('mini-switch-replace-checked');
this.find('input[type=radio].mini-switch:disabled + .mini-switch-replace, input[type=checkbox].mini-switch:disabled + .mini-switch-replace').addClass('mini-switch-replace-disabled');
});
// Document initial setup
$(document).ready(function()
{
// Input switches
$('input[type=radio].switch, input[type=checkbox].switch').click(function() {
if (!this.checked)
{
$(this).next('.switch-replace').addClass('switch-replace-checked');
}
else
{
$(this).next('.switch-replace').removeClass('switch-replace-checked');
}
});
$('input[type=radio].mini-switch, input[type=checkbox].mini-switch').click(function() {
if (!this.checked)
{
$(this).next('.mini-switch-replace').addClass('mini-switch-replace-checked');
}
else
{
$(this).next('.mini-switch-replace').removeClass('mini-switch-replace-checked');
}
});
});
})(jQuery); | smailovski/E-institut | web/bundles/eieinstitut/js/standard.ie.js | JavaScript | mit | 2,936 |
#ifndef __USER_DEVICEFIND_H__
#define __USER_DEVICEFIND_H__
void user_platform_timer_start(char* pbuffer, struct espconn *pespconn);
#endif
| node-wot/node-wot | sdk/esp_iot_sdk_v1.4.0/examples/IoT_Demo/include/user_esp_platform_timer.h | C | mit | 148 |
import Ember from 'ember';
import MaterializeNavBar from './md-navbar';
export default MaterializeNavBar.extend({
init() {
this._super(...arguments);
Ember.deprecate("{{materialize-navbar}} has been deprecated. Please use {{md-navbar}} instead", false, {url: "https://github.com/sgasser/ember-cli-materialize/issues/67"});
}
});
| ladyleet/ember-cli-materialize | app/components/materialize-navbar.js | JavaScript | mit | 342 |
var _ = require('underscore');
module.exports = {
'passport-number': {
labelClassName: 'visuallyhidden',
validate: [
'required'
]
},
'can-sign': {
legend: {
className: 'visuallyhidden'
},
formatter: 'boolean',
validate: ['required'],
options: [
{
value: true,
label: 'I understand and will sign my passport',
},
{
value: false,
label: 'I can’t sign my name',
toggle: 'no-sign'
}
]
},
'no-sign-reason': {
className: 'textarea',
validate: [
'required',
{ type: 'maxlength', arguments: 250 }
],
dependent: {
field: 'can-sign',
value: false
}
},
'age-year': {
labelClassName: 'form-label',
formatter: 'removehyphens',
validate: [
'numeric',
'required'
]
},
'age-month': {
labelClassName: 'form-label',
formatter: 'removehyphens',
validate: [
'numeric',
'required'
]
},
'age-day': {
labelClassName: 'form-label',
formatter: 'removehyphens',
validate: [
'numeric',
'required'
]
},
'title':{
legend: {
value: 'Your title',
className: 'visuallyhidden'
},
options: [
{value: 'Mr', label: 'Mr'},
{value: 'Mrs', label: 'Mrs'},
{value: 'Miss', label: 'Miss'},
{value: 'Ms', label: 'Ms'},
{value: 'Other', label: 'Other', toggle: 'other-titles'}
],
validate: [
'required'
]
},
'name': {
},
'lastname': {
},
'previous-name': {
formatter: 'boolean',
validate: 'required',
legend: {
className: 'form-label-bold'
},
className: 'inline',
options: [
{ value: true, label: 'Yes', toggle: 'previous-names', child: 'input-text' },
{ value: false, label: 'No' }
]
},
'previous-names': {
validate: [
'required',
{ type: 'maxlength', arguments: 100 }
],
dependent: {
field: 'previous-name',
value: true
}
},
'gender': {
validate: [
'required'
],
legend: {
value: 'Your gender',
className: 'visuallyhidden'
},
options: [
{ value: 'F', label: 'Female' },
{ value: 'M', label: 'Male' }
]
},
'town-of-birth': {
validate: [
'required'
]
},
'born-in-uk': {
formatter: 'boolean',
validate: 'required',
legend: {
className: 'form-label-bold'
},
options: [
{ value: true, label: 'Yes' },
{ value: false, label: 'No', toggle: 'birth-country' }
],
className: 'inline'
},
'country-of-birth': {
validate: 'required',
dependent: {
field: 'born-in-uk',
value: false
},
},
'expiry-year': {
labelClassName: 'form-label',
formatter: 'removehyphens',
validate: [
'numeric',
'required'
]
},
'expiry-month': {
labelClassName: 'form-label',
formatter: 'removehyphens',
validate: [
'numeric',
'required'
]
},
'address1': {
validate: [
'required'
]
},
'address2': {
labelClassName: 'visuallyhidden',
formatter: 'removehyphens'
},
'address3': {
labelClassName: 'visuallyhidden',
formatter: 'removehyphens'
},
'address4': {
labelClassName: 'visuallyhidden',
formatter: 'removehyphens'
},
'address5': {
labelClassName: 'visuallyhidden',
formatter: 'removehyphens'
},
'town': {
validate: [
'required'
]
},
'postcode': {
validate: [
'required'
]
},
'email': {
validate: [
'required'
]
},
'country-code': {
labelClassName: 'visuallyhidden',
formatter: 'removehyphens',
validate: [
'required'
]
},
'mobile': {
validate: [
'numeric',
'required'
]
},
'passport-options-dps':{
legend: {
value: 'Passport size'
},
options: [
{value: '32', label: 'Standard adult 32-page passport (£128)'},
{value: '48', label: 'Jumbo adult 48-page passport (£137)'}
],
validate: [
'required'
]
},
'passport-size': {
formatter: 'boolean',
validate: 'required',
legend: {
value: 'What size passport would you like?',
className: 'form-label'
},
options: [
{ value: false, label: '32-page passport (free)' },
{ value: true, label: '48-page passport ({{#currency}}{{largePassportCost}}{{/currency}})' }
],
dependent: {
field: 'passport-size-dependent',
value: 'true'
}
},
braille: {
formatter: 'boolean-strict',
legend: {
value: 'Add a Braille sticker'
},
},
'return-passport':{
legend: {
value: 'How would you like us to return your ols passport?',
className: 'visuallyhidden'
},
options: [
{value: 'Special-delivery', label: 'Special delivery (£3 extra)'},
{value: 'Standard', label: 'Standard post (free)'}
],
validate: [
'required'
]
},
'secure-return': {
formatter: 'boolean',
validate: 'required',
legend: {
value: 'How would you like us to send your old passport back to you?',
className: 'form-label-bold'
},
options: [
{ value: true, label: 'Special delivery (£3 extra)' },
{ value: false, label: 'Standard post (free)' }
]
},
};
| UKHomeOffice/passports-prototype | routes/priority_service_170731/renew/fields.js | JavaScript | mit | 5,694 |
describe :time_day, shared: true do
it "returns the day of the month (1..n) for a local Time" do
with_timezone("CET", 1) do
Time.local(1970, 1, 1).send(@method).should == 1
end
end
it "returns the day of the month for a UTC Time" do
Time.utc(1970, 1, 1).send(@method).should == 1
end
it "returns the day of the month for a Time with a fixed offset" do
Time.new(2012, 1, 1, 0, 0, 0, -3600).send(@method).should == 1
end
end
| pmq20/ruby-compiler | ruby/spec/ruby/core/time/shared/day.rb | Ruby | mit | 459 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// A barrier allows multiple tasks to cooperatively work on some algorithm in parallel.
// A group of tasks cooperate by moving through a series of phases, where each in the group signals it has arrived at
// the barrier in a given phase and implicitly waits for all others to arrive.
// The same barrier can be used for multiple phases.
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System.Diagnostics;
using System.Runtime.Serialization;
using System.Security;
namespace System.Threading
{
/// <summary>
/// The exception that is thrown when the post-phase action of a <see cref="Barrier"/> fails.
/// </summary>
[Serializable]
[System.Runtime.CompilerServices.TypeForwardedFrom("System, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
public class BarrierPostPhaseException : Exception
{
/// <summary>
/// Initializes a new instance of the <see cref="BarrierPostPhaseException"/> class.
/// </summary>
public BarrierPostPhaseException()
: this((string)null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="BarrierPostPhaseException"/> class with the specified inner exception.
/// </summary>
/// <param name="innerException">The exception that is the cause of the current exception.</param>
public BarrierPostPhaseException(Exception innerException)
: this(null, innerException)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="BarrierPostPhaseException"/> class with a specified error message.
/// </summary>
/// <param name="message">A string that describes the exception.</param>
public BarrierPostPhaseException(string message)
: this(message, null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="BarrierPostPhaseException"/> class with a specified error message and inner exception.
/// </summary>
/// <param name="message">A string that describes the exception.</param>
/// <param name="innerException">The exception that is the cause of the current exception.</param>
public BarrierPostPhaseException(string message, Exception innerException)
: base(message == null ? SR.BarrierPostPhaseException : message, innerException)
{
}
/// <summary>
/// Initializes a new instance of the BarrierPostPhaseException class with serialized data.
/// </summary>
/// <param name="info">The object that holds the serialized object data.</param>
/// <param name="context">The contextual information about the source or destination.</param>
protected BarrierPostPhaseException(SerializationInfo info, StreamingContext context)
: base(info, context)
{
}
}
/// <summary>
/// Enables multiple tasks to cooperatively work on an algorithm in parallel through multiple phases.
/// </summary>
/// <remarks>
/// <para>
/// A group of tasks cooperate by moving through a series of phases, where each in the group signals it
/// has arrived at the <see cref="Barrier"/> in a given phase and implicitly waits for all others to
/// arrive. The same <see cref="Barrier"/> can be used for multiple phases.
/// </para>
/// <para>
/// All public and protected members of <see cref="Barrier"/> are thread-safe and may be used
/// concurrently from multiple threads, with the exception of Dispose, which
/// must only be used when all other operations on the <see cref="Barrier"/> have
/// completed.
/// </para>
/// </remarks>
[DebuggerDisplay("Participant Count={ParticipantCount},Participants Remaining={ParticipantsRemaining}")]
public class Barrier : IDisposable
{
//This variable holds the basic barrier variables:
// 1- The current participants count
// 2- The total participants count
// 3- The sense flag (true if the current phase is even, false otherwise)
// The first 15 bits are for the total count which means the maximum participants for the barrier is about 32K
// The 16th bit is dummy
// The next 15th bit for the current
// And the last highest bit is for the sense
private volatile int _currentTotalCount;
// Bitmask to extract the current count
private const int CURRENT_MASK = 0x7FFF0000;
// Bitmask to extract the total count
private const int TOTAL_MASK = 0x00007FFF;
// Bitmask to extract the sense flag
private const int SENSE_MASK = unchecked((int)0x80000000);
// The maximum participants the barrier can operate = 32767 ( 2 power 15 - 1 )
private const int MAX_PARTICIPANTS = TOTAL_MASK;
// The current barrier phase
// We don't need to worry about overflow, the max value is 2^63-1; If it starts from 0 at a
// rate of 4 billion increments per second, it will takes about 64 years to overflow.
private long _currentPhase;
// dispose flag
private bool _disposed;
// Odd phases event
private ManualResetEventSlim _oddEvent;
// Even phases event
private ManualResetEventSlim _evenEvent;
// The execution context of the creator thread
private ExecutionContext _ownerThreadContext;
// The EC callback that invokes the post phase action
private static ContextCallback s_invokePostPhaseAction;
// Post phase action after each phase
private Action<Barrier> _postPhaseAction;
// In case the post phase action throws an exception, wraps it in BarrierPostPhaseException
private Exception _exception;
// This is the ManagedThreadID of the postPhaseAction caller thread, this is used to determine if the SignalAndWait, Dispose or Add/RemoveParticipant caller thread is
// the same thread as the postPhaseAction thread which means this method was called from the postPhaseAction which is illegal.
// This value is captured before calling the action and reset back to zero after it.
private int _actionCallerID;
#region Properties
/// <summary>
/// Gets the number of participants in the barrier that haven't yet signaled
/// in the current phase.
/// </summary>
/// <remarks>
/// This could be 0 during a post-phase action delegate execution or if the
/// ParticipantCount is 0.
/// </remarks>
public int ParticipantsRemaining
{
get
{
int currentTotal = _currentTotalCount;
int total = (int)(currentTotal & TOTAL_MASK);
int current = (int)((currentTotal & CURRENT_MASK) >> 16);
return total - current;
}
}
/// <summary>
/// Gets the total number of participants in the barrier.
/// </summary>
public int ParticipantCount
{
get { return (int)(_currentTotalCount & TOTAL_MASK); }
}
/// <summary>
/// Gets the number of the barrier's current phase.
/// </summary>
public long CurrentPhaseNumber
{
// use the new Volatile.Read/Write method because it is cheaper than Interlocked.Read on AMD64 architecture
get { return Volatile.Read(ref _currentPhase); }
internal set { Volatile.Write(ref _currentPhase, value); }
}
#endregion
/// <summary>
/// Initializes a new instance of the <see cref="Barrier"/> class.
/// </summary>
/// <param name="participantCount">The number of participating threads.</param>
/// <exception cref="ArgumentOutOfRangeException"> <paramref name="participantCount"/> is less than 0
/// or greater than <see cref="T:System.Int16.MaxValue"/>.</exception>
public Barrier(int participantCount)
: this(participantCount, null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Barrier"/> class.
/// </summary>
/// <param name="participantCount">The number of participating threads.</param>
/// <param name="postPhaseAction">The <see cref="T:System.Action`1"/> to be executed after each
/// phase.</param>
/// <exception cref="T:System.ArgumentOutOfRangeException"> <paramref name="participantCount"/> is less than 0
/// or greater than <see cref="T:System.Int32.MaxValue"/>.</exception>
/// <remarks>
/// The <paramref name="postPhaseAction"/> delegate will be executed after
/// all participants have arrived at the barrier in one phase. The participants
/// will not be released to the next phase until the postPhaseAction delegate
/// has completed execution.
/// </remarks>
public Barrier(int participantCount, Action<Barrier> postPhaseAction)
{
// the count must be non negative value
if (participantCount < 0 || participantCount > MAX_PARTICIPANTS)
{
throw new ArgumentOutOfRangeException(nameof(participantCount), participantCount, SR.Barrier_ctor_ArgumentOutOfRange);
}
_currentTotalCount = (int)participantCount;
_postPhaseAction = postPhaseAction;
//Lazily initialize the events
_oddEvent = new ManualResetEventSlim(true);
_evenEvent = new ManualResetEventSlim(false);
// Capture the context if the post phase action is not null
if (postPhaseAction != null)
{
_ownerThreadContext = ExecutionContext.Capture();
}
_actionCallerID = 0;
}
/// <summary>
/// Extract the three variables current, total and sense from a given big variable
/// </summary>
/// <param name="currentTotal">The integer variable that contains the other three variables</param>
/// <param name="current">The current participant count</param>
/// <param name="total">The total participants count</param>
/// <param name="sense">The sense flag</param>
private void GetCurrentTotal(int currentTotal, out int current, out int total, out bool sense)
{
total = (int)(currentTotal & TOTAL_MASK);
current = (int)((currentTotal & CURRENT_MASK) >> 16);
sense = (currentTotal & SENSE_MASK) == 0 ? true : false;
}
/// <summary>
/// Write the three variables current. total and the sense to the m_currentTotal
/// </summary>
/// <param name="currentTotal">The old current total to compare</param>
/// <param name="current">The current participant count</param>
/// <param name="total">The total participants count</param>
/// <param name="sense">The sense flag</param>
/// <returns>True if the CAS succeeded, false otherwise</returns>
private bool SetCurrentTotal(int currentTotal, int current, int total, bool sense)
{
int newCurrentTotal = (current << 16) | total;
if (!sense)
{
newCurrentTotal |= SENSE_MASK;
}
#pragma warning disable 0420
return Interlocked.CompareExchange(ref _currentTotalCount, newCurrentTotal, currentTotal) == currentTotal;
#pragma warning restore 0420
}
/// <summary>
/// Notifies the <see cref="Barrier"/> that there will be an additional participant.
/// </summary>
/// <returns>The phase number of the barrier in which the new participants will first
/// participate.</returns>
/// <exception cref="T:System.InvalidOperationException">
/// Adding a participant would cause the barrier's participant count to
/// exceed <see cref="T:System.Int16.MaxValue"/>.
/// </exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action.
/// </exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public long AddParticipant()
{
try
{
return AddParticipants(1);
}
catch (ArgumentOutOfRangeException)
{
throw new InvalidOperationException(SR.Barrier_AddParticipants_Overflow_ArgumentOutOfRange);
}
}
/// <summary>
/// Notifies the <see cref="Barrier"/> that there will be additional participants.
/// </summary>
/// <param name="participantCount">The number of additional participants to add to the
/// barrier.</param>
/// <returns>The phase number of the barrier in which the new participants will first
/// participate.</returns>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="participantCount"/> is less than
/// 0.</exception>
/// <exception cref="T:System.ArgumentOutOfRangeException">Adding <paramref name="participantCount"/> participants would cause the
/// barrier's participant count to exceed <see cref="T:System.Int16.MaxValue"/>.</exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action.
/// </exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public long AddParticipants(int participantCount)
{
// check dispose
ThrowIfDisposed();
if (participantCount < 1)
{
throw new ArgumentOutOfRangeException(nameof(participantCount), participantCount,
SR.Barrier_AddParticipants_NonPositive_ArgumentOutOfRange);
}
else if (participantCount > MAX_PARTICIPANTS) //overflow
{
throw new ArgumentOutOfRangeException(nameof(participantCount),
SR.Barrier_AddParticipants_Overflow_ArgumentOutOfRange);
}
// in case of this is called from the PHA
if (_actionCallerID != 0 && Environment.CurrentManagedThreadId == _actionCallerID)
{
throw new InvalidOperationException(SR.Barrier_InvalidOperation_CalledFromPHA);
}
SpinWait spinner = new SpinWait();
long newPhase = 0;
while (true)
{
int currentTotal = _currentTotalCount;
int total;
int current;
bool sense;
GetCurrentTotal(currentTotal, out current, out total, out sense);
if (participantCount + total > MAX_PARTICIPANTS) //overflow
{
throw new ArgumentOutOfRangeException(nameof(participantCount),
SR.Barrier_AddParticipants_Overflow_ArgumentOutOfRange);
}
if (SetCurrentTotal(currentTotal, current, total + participantCount, sense))
{
// Calculating the first phase for that participant, if the current phase already finished return the next phase else return the current phase
// To know that the current phase is the sense doesn't match the
// phase odd even, so that means it didn't yet change the phase count, so currentPhase +1 is returned, otherwise currentPhase is returned
long currPhase = CurrentPhaseNumber;
newPhase = (sense != (currPhase % 2 == 0)) ? currPhase + 1 : currPhase;
// If this participant is going to join the next phase, which means the postPhaseAction is being running, this participants must wait until this done
// and its event is reset.
// Without that, if the postPhaseAction takes long time, this means the event that the current participant is going to wait on is still set
// (FinishPhase didn't reset it yet) so it should wait until it reset
if (newPhase != currPhase)
{
// Wait on the opposite event
if (sense)
{
_oddEvent.Wait();
}
else
{
_evenEvent.Wait();
}
}
//This else to fix the racing where the current phase has been finished, m_currentPhase has been updated but the events have not been set/reset yet
// otherwise when this participant calls SignalAndWait it will wait on a set event however all other participants have not arrived yet.
else
{
if (sense && _evenEvent.IsSet)
_evenEvent.Reset();
else if (!sense && _oddEvent.IsSet)
_oddEvent.Reset();
}
break;
}
spinner.SpinOnce();
}
return newPhase;
}
/// <summary>
/// Notifies the <see cref="Barrier"/> that there will be one less participant.
/// </summary>
/// <exception cref="T:System.InvalidOperationException">The barrier already has 0
/// participants.</exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action.
/// </exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public void RemoveParticipant()
{
RemoveParticipants(1);
}
/// <summary>
/// Notifies the <see cref="Barrier"/> that there will be fewer participants.
/// </summary>
/// <param name="participantCount">The number of additional participants to remove from the barrier.</param>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="participantCount"/> is less than
/// 0.</exception>
/// <exception cref="T:System.InvalidOperationException">The barrier already has 0 participants.</exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action.
/// </exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public void RemoveParticipants(int participantCount)
{
// check dispose
ThrowIfDisposed();
// Validate input
if (participantCount < 1)
{
throw new ArgumentOutOfRangeException(nameof(participantCount), participantCount,
SR.Barrier_RemoveParticipants_NonPositive_ArgumentOutOfRange);
}
// in case of this is called from the PHA
if (_actionCallerID != 0 && Environment.CurrentManagedThreadId == _actionCallerID)
{
throw new InvalidOperationException(SR.Barrier_InvalidOperation_CalledFromPHA);
}
SpinWait spinner = new SpinWait();
while (true)
{
int currentTotal = _currentTotalCount;
int total;
int current;
bool sense;
GetCurrentTotal(currentTotal, out current, out total, out sense);
if (total < participantCount)
{
throw new ArgumentOutOfRangeException(nameof(participantCount),
SR.Barrier_RemoveParticipants_ArgumentOutOfRange);
}
if (total - participantCount < current)
{
throw new InvalidOperationException(SR.Barrier_RemoveParticipants_InvalidOperation);
}
// If the remaining participants = current participants, then finish the current phase
int remaingParticipants = total - participantCount;
if (remaingParticipants > 0 && current == remaingParticipants)
{
if (SetCurrentTotal(currentTotal, 0, total - participantCount, !sense))
{
FinishPhase(sense);
break;
}
}
else
{
if (SetCurrentTotal(currentTotal, current, total - participantCount, sense))
{
break;
}
}
spinner.SpinOnce();
}
}
/// <summary>
/// Signals that a participant has reached the <see cref="Barrier"/> and waits for all other
/// participants to reach the barrier as well.
/// </summary>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action, the barrier currently has 0 participants,
/// or the barrier is being used by more threads than are registered as participants.
/// </exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public void SignalAndWait()
{
SignalAndWait(new CancellationToken());
}
/// <summary>
/// Signals that a participant has reached the <see cref="Barrier"/> and waits for all other
/// participants to reach the barrier, while observing a <see
/// cref="T:System.Threading.CancellationToken"/>.
/// </summary>
/// <param name="cancellationToken">The <see cref="T:System.Threading.CancellationToken"/> to
/// observe.</param>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action, the barrier currently has 0 participants,
/// or the barrier is being used by more threads than are registered as participants.
/// </exception>
/// <exception cref="T:System.OperationCanceledException"><paramref name="cancellationToken"/> has been
/// canceled.</exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public void SignalAndWait(CancellationToken cancellationToken)
{
#if DEBUG
bool result =
#endif
SignalAndWait(Timeout.Infinite, cancellationToken);
#if DEBUG
Debug.Assert(result);
#endif
}
/// <summary>
/// Signals that a participant has reached the <see cref="Barrier"/> and waits for all other
/// participants to reach the barrier as well, using a
/// <see cref="T:System.TimeSpan"/> to measure the time interval.
/// </summary>
/// <param name="timeout">A <see cref="T:System.TimeSpan"/> that represents the number of
/// milliseconds to wait, or a <see cref="T:System.TimeSpan"/> that represents -1 milliseconds to
/// wait indefinitely.</param>
/// <returns>true if all other participants reached the barrier; otherwise, false.</returns>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="timeout"/>is a negative number
/// other than -1 milliseconds, which represents an infinite time-out, or it is greater than
/// <see cref="T:System.Int32.MaxValue"/>.</exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action, the barrier currently has 0 participants,
/// or the barrier is being used by more threads than are registered as participants.
/// </exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public bool SignalAndWait(TimeSpan timeout)
{
return SignalAndWait(timeout, new CancellationToken());
}
/// <summary>
/// Signals that a participant has reached the <see cref="Barrier"/> and waits for all other
/// participants to reach the barrier as well, using a
/// <see cref="T:System.TimeSpan"/> to measure the time interval, while observing a <see
/// cref="T:System.Threading.CancellationToken"/>.
/// </summary>
/// <param name="timeout">A <see cref="T:System.TimeSpan"/> that represents the number of
/// milliseconds to wait, or a <see cref="T:System.TimeSpan"/> that represents -1 milliseconds to
/// wait indefinitely.</param>
/// <param name="cancellationToken">The <see cref="T:System.Threading.CancellationToken"/> to
/// observe.</param>
/// <returns>true if all other participants reached the barrier; otherwise, false.</returns>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="timeout"/>is a negative number
/// other than -1 milliseconds, which represents an infinite time-out.</exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action, the barrier currently has 0 participants,
/// or the barrier is being used by more threads than are registered as participants.
/// </exception>
/// <exception cref="T:System.OperationCanceledException"><paramref name="cancellationToken"/> has been
/// canceled.</exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public bool SignalAndWait(TimeSpan timeout, CancellationToken cancellationToken)
{
long totalMilliseconds = (long)timeout.TotalMilliseconds;
if (totalMilliseconds < -1 || totalMilliseconds > int.MaxValue)
{
throw new System.ArgumentOutOfRangeException(nameof(timeout), timeout,
SR.Barrier_SignalAndWait_ArgumentOutOfRange);
}
return SignalAndWait((int)timeout.TotalMilliseconds, cancellationToken);
}
/// <summary>
/// Signals that a participant has reached the <see cref="Barrier"/> and waits for all other
/// participants to reach the barrier as well, using a
/// 32-bit signed integer to measure the time interval.
/// </summary>
/// <param name="millisecondsTimeout">The number of milliseconds to wait, or <see
/// cref="Timeout.Infinite"/>(-1) to wait indefinitely.</param>
/// <returns>true if all other participants reached the barrier; otherwise, false.</returns>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="millisecondsTimeout"/> is a
/// negative number other than -1, which represents an infinite time-out.</exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action, the barrier currently has 0 participants,
/// or the barrier is being used by more threads than are registered as participants.
/// </exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public bool SignalAndWait(int millisecondsTimeout)
{
return SignalAndWait(millisecondsTimeout, new CancellationToken());
}
/// <summary>
/// Signals that a participant has reached the barrier and waits for all other participants to reach
/// the barrier as well, using a
/// 32-bit signed integer to measure the time interval, while observing a <see
/// cref="T:System.Threading.CancellationToken"/>.
/// </summary>
/// <param name="millisecondsTimeout">The number of milliseconds to wait, or <see
/// cref="Timeout.Infinite"/>(-1) to wait indefinitely.</param>
/// <param name="cancellationToken">The <see cref="T:System.Threading.CancellationToken"/> to
/// observe.</param>
/// <returns>true if all other participants reached the barrier; otherwise, false.</returns>
/// <exception cref="T:System.ArgumentOutOfRangeException"><paramref name="millisecondsTimeout"/> is a
/// negative number other than -1, which represents an infinite time-out.</exception>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action, the barrier currently has 0 participants,
/// or the barrier is being used by more threads than are registered as participants.
/// </exception>
/// <exception cref="T:System.OperationCanceledException"><paramref name="cancellationToken"/> has been
/// canceled.</exception>
/// <exception cref="T:System.ObjectDisposedException">The current instance has already been
/// disposed.</exception>
public bool SignalAndWait(int millisecondsTimeout, CancellationToken cancellationToken)
{
ThrowIfDisposed();
cancellationToken.ThrowIfCancellationRequested();
if (millisecondsTimeout < -1)
{
throw new System.ArgumentOutOfRangeException(nameof(millisecondsTimeout), millisecondsTimeout,
SR.Barrier_SignalAndWait_ArgumentOutOfRange);
}
// in case of this is called from the PHA
if (_actionCallerID != 0 && Environment.CurrentManagedThreadId == _actionCallerID)
{
throw new InvalidOperationException(SR.Barrier_InvalidOperation_CalledFromPHA);
}
// local variables to extract the basic barrier variable and update them
// The are declared here instead of inside the loop body because the will be used outside the loop
bool sense; // The sense of the barrier *before* the phase associated with this SignalAndWait call completes
int total;
int current;
int currentTotal;
long phase;
SpinWait spinner = new SpinWait();
while (true)
{
currentTotal = _currentTotalCount;
GetCurrentTotal(currentTotal, out current, out total, out sense);
phase = CurrentPhaseNumber;
// throw if zero participants
if (total == 0)
{
throw new InvalidOperationException(SR.Barrier_SignalAndWait_InvalidOperation_ZeroTotal);
}
// Try to detect if the number of threads for this phase exceeded the total number of participants or not
// This can be detected if the current is zero which means all participants for that phase has arrived and the phase number is not changed yet
if (current == 0 && sense != (CurrentPhaseNumber % 2 == 0))
{
throw new InvalidOperationException(SR.Barrier_SignalAndWait_InvalidOperation_ThreadsExceeded);
}
//This is the last thread, finish the phase
if (current + 1 == total)
{
if (SetCurrentTotal(currentTotal, 0, total, !sense))
{
if (CdsSyncEtwBCLProvider.Log.IsEnabled())
{
CdsSyncEtwBCLProvider.Log.Barrier_PhaseFinished(sense, CurrentPhaseNumber);
}
FinishPhase(sense);
return true;
}
}
else if (SetCurrentTotal(currentTotal, current + 1, total, sense))
{
break;
}
spinner.SpinOnce();
}
// ** Perform the real wait **
// select the correct event to wait on, based on the current sense.
ManualResetEventSlim eventToWaitOn = (sense) ? _evenEvent : _oddEvent;
bool waitWasCanceled = false;
bool waitResult = false;
try
{
waitResult = DiscontinuousWait(eventToWaitOn, millisecondsTimeout, cancellationToken, phase);
}
catch (OperationCanceledException)
{
waitWasCanceled = true;
}
catch (ObjectDisposedException)// in case a race happen where one of the thread returned from SignalAndWait and the current thread calls Wait on a disposed event
{
// make sure the current phase for this thread is already finished, otherwise propagate the exception
if (phase < CurrentPhaseNumber)
waitResult = true;
else
throw;
}
if (!waitResult)
{
//reset the spinLock to prepare it for the next loop
spinner.Reset();
//If the wait timeout expired and all other thread didn't reach the barrier yet, update the current count back
while (true)
{
bool newSense;
currentTotal = _currentTotalCount;
GetCurrentTotal(currentTotal, out current, out total, out newSense);
// If the timeout expired and the phase has just finished, return true and this is considered as succeeded SignalAndWait
//otherwise the timeout expired and the current phase has not been finished yet, return false
//The phase is finished if the phase member variable is changed (incremented) or the sense has been changed
// we have to use the statements in the comparison below for two cases:
// 1- The sense is changed but the last thread didn't update the phase yet
// 2- The phase is already incremented but the sense flipped twice due to the termination of the next phase
if (phase < CurrentPhaseNumber || sense != newSense)
{
// The current phase has been finished, but we shouldn't return before the events are set/reset otherwise this thread could start
// next phase and the appropriate event has not reset yet which could make it return immediately from the next phase SignalAndWait
// before waiting other threads
WaitCurrentPhase(eventToWaitOn, phase);
Debug.Assert(phase < CurrentPhaseNumber);
break;
}
//The phase has not been finished yet, try to update the current count.
if (SetCurrentTotal(currentTotal, current - 1, total, sense))
{
//if here, then the attempt to back out was successful.
//throw (a fresh) OCE if cancellation woke the wait
//or return false if it was the timeout that woke the wait.
//
if (waitWasCanceled)
throw new OperationCanceledException(SR.Common_OperationCanceled, cancellationToken);
else
return false;
}
spinner.SpinOnce();
}
}
if (_exception != null)
throw new BarrierPostPhaseException(_exception);
return true;
}
/// <summary>
/// Finish the phase by invoking the post phase action, and setting the event, this must be called by the
/// last arrival thread
/// </summary>
/// <param name="observedSense">The current phase sense</param>
private void FinishPhase(bool observedSense)
{
// Execute the PHA in try/finally block to reset the variables back in case of it threw an exception
if (_postPhaseAction != null)
{
try
{
// Capture the caller thread ID to check if the Add/RemoveParticipant(s) is called from the PHA
_actionCallerID = Environment.CurrentManagedThreadId;
if (_ownerThreadContext != null)
{
var currentContext = _ownerThreadContext;
ContextCallback handler = s_invokePostPhaseAction;
if (handler == null)
{
s_invokePostPhaseAction = handler = InvokePostPhaseAction;
}
ExecutionContext.Run(_ownerThreadContext, handler, this);
}
else
{
_postPhaseAction(this);
}
_exception = null; // reset the exception if it was set previously
}
catch (Exception ex)
{
_exception = ex;
}
finally
{
_actionCallerID = 0;
SetResetEvents(observedSense);
if (_exception != null)
throw new BarrierPostPhaseException(_exception);
}
}
else
{
SetResetEvents(observedSense);
}
}
/// <summary>
/// Helper method to call the post phase action
/// </summary>
/// <param name="obj"></param>
private static void InvokePostPhaseAction(object obj)
{
var thisBarrier = (Barrier)obj;
thisBarrier._postPhaseAction(thisBarrier);
}
/// <summary>
/// Sets the current phase event and reset the next phase event
/// </summary>
/// <param name="observedSense">The current phase sense</param>
private void SetResetEvents(bool observedSense)
{
// Increment the phase count using Volatile class because m_currentPhase is 64 bit long type, that could cause torn write on 32 bit machines
CurrentPhaseNumber = CurrentPhaseNumber + 1;
if (observedSense)
{
_oddEvent.Reset();
_evenEvent.Set();
}
else
{
_evenEvent.Reset();
_oddEvent.Set();
}
}
/// <summary>
/// Wait until the current phase finishes completely by spinning until either the event is set,
/// or the phase count is incremented more than one time
/// </summary>
/// <param name="currentPhaseEvent">The current phase event</param>
/// <param name="observedPhase">The current phase for that thread</param>
private void WaitCurrentPhase(ManualResetEventSlim currentPhaseEvent, long observedPhase)
{
//spin until either of these two conditions succeeds
//1- The event is set
//2- the phase count is incremented more than one time, this means the next phase is finished as well,
//but the event will be reset again, so we check the phase count instead
SpinWait spinner = new SpinWait();
while (!currentPhaseEvent.IsSet && CurrentPhaseNumber - observedPhase <= 1)
{
spinner.SpinOnce();
}
}
/// <summary>
/// The reason of discontinuous waiting instead of direct waiting on the event is to avoid the race where the sense is
/// changed twice because the next phase is finished (due to either RemoveParticipant is called or another thread joined
/// the next phase instead of the current thread) so the current thread will be stuck on the event because it is reset back
/// The maxWait and the shift numbers are arbitrarily chosen, there were no references picking them
/// </summary>
/// <param name="currentPhaseEvent">The current phase event</param>
/// <param name="totalTimeout">wait timeout in milliseconds</param>
/// <param name="token">cancellation token passed to SignalAndWait</param>
/// <param name="observedPhase">The current phase number for this thread</param>
/// <returns>True if the event is set or the phase number changed, false if the timeout expired</returns>
private bool DiscontinuousWait(ManualResetEventSlim currentPhaseEvent, int totalTimeout, CancellationToken token, long observedPhase)
{
int maxWait = 100; // 100 ms
int waitTimeCeiling = 10000; // 10 seconds
while (observedPhase == CurrentPhaseNumber)
{
// the next wait time, the min of the maxWait and the totalTimeout
int waitTime = totalTimeout == Timeout.Infinite ? maxWait : Math.Min(maxWait, totalTimeout);
if (currentPhaseEvent.Wait(waitTime, token))
return true;
//update the total wait time
if (totalTimeout != Timeout.Infinite)
{
totalTimeout -= waitTime;
if (totalTimeout <= 0)
return false;
}
//if the maxwait exceeded 10 seconds then we will stop increasing the maxWait time and keep it 10 seconds, otherwise keep doubling it
maxWait = maxWait >= waitTimeCeiling ? waitTimeCeiling : Math.Min(maxWait << 1, waitTimeCeiling);
}
//if we exited the loop because the observed phase doesn't match the current phase, then we have to spin to make sure
//the event is set or the next phase is finished
WaitCurrentPhase(currentPhaseEvent, observedPhase);
return true;
}
/// <summary>
/// Releases all resources used by the current instance of <see cref="Barrier"/>.
/// </summary>
/// <exception cref="T:System.InvalidOperationException">
/// The method was invoked from within a post-phase action.
/// </exception>
/// <remarks>
/// Unlike most of the members of <see cref="Barrier"/>, Dispose is not thread-safe and may not be
/// used concurrently with other members of this instance.
/// </remarks>
public void Dispose()
{
// in case of this is called from the PHA
if (_actionCallerID != 0 && Environment.CurrentManagedThreadId == _actionCallerID)
{
throw new InvalidOperationException(SR.Barrier_InvalidOperation_CalledFromPHA);
}
Dispose(true);
GC.SuppressFinalize(this);
}
/// <summary>
/// When overridden in a derived class, releases the unmanaged resources used by the
/// <see cref="Barrier"/>, and optionally releases the managed resources.
/// </summary>
/// <param name="disposing">true to release both managed and unmanaged resources; false to release
/// only unmanaged resources.</param>
/// <remarks>
/// Unlike most of the members of <see cref="Barrier"/>, Dispose is not thread-safe and may not be
/// used concurrently with other members of this instance.
/// </remarks>
protected virtual void Dispose(bool disposing)
{
if (!_disposed)
{
if (disposing)
{
_oddEvent.Dispose();
_evenEvent.Dispose();
}
_disposed = true;
}
}
/// <summary>
/// Throw ObjectDisposedException if the barrier is disposed
/// </summary>
private void ThrowIfDisposed()
{
if (_disposed)
{
throw new ObjectDisposedException("Barrier", SR.Barrier_Dispose);
}
}
}
}
| Jiayili1/corefx | src/System.Threading/src/System/Threading/Barrier.cs | C# | mit | 45,242 |
/*
* TI OMAP I2C master mode driver
*
* Copyright (C) 2003 MontaVista Software, Inc.
* Copyright (C) 2005 Nokia Corporation
* Copyright (C) 2004 - 2007 Texas Instruments.
*
* Originally written by MontaVista Software, Inc.
* Additional contributions by:
* Tony Lindgren <tony@atomide.com>
* Imre Deak <imre.deak@nokia.com>
* Juha Yrjölä <juha.yrjola@solidboot.com>
* Syed Khasim <x0khasim@ti.com>
* Nishant Menon <nm@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/hwspinlock.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos_params.h>
#ifdef CONFIG_OMAP4_DPLL_CASCADING
#include <linux/notifier.h>
#include <plat/clock.h>
#define OMAP_I2C_MASTER_CLOCK 96000000
#define OMAP_I2C_DPLL_CLOCK 49152000
#endif
/* I2C controller revisions */
#define OMAP_I2C_REV_2 0x20
/* I2C controller revisions present on specific hardware */
#define OMAP_I2C_REV_ON_2430 0x36
#define OMAP_I2C_REV_ON_3430 0x3C
#define OMAP_I2C_REV_ON_4430 0x40
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
/* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */
enum {
OMAP_I2C_REV_REG = 0,
OMAP_I2C_IE_REG,
OMAP_I2C_STAT_REG,
OMAP_I2C_IV_REG,
OMAP_I2C_WE_REG,
OMAP_I2C_SYSS_REG,
OMAP_I2C_BUF_REG,
OMAP_I2C_CNT_REG,
OMAP_I2C_DATA_REG,
OMAP_I2C_CON_REG,
OMAP_I2C_OA_REG,
OMAP_I2C_SA_REG,
OMAP_I2C_PSC_REG,
OMAP_I2C_SCLL_REG,
OMAP_I2C_SCLH_REG,
OMAP_I2C_SYSTEST_REG,
OMAP_I2C_BUFSTAT_REG,
OMAP_I2C_REVNB_LO,
OMAP_I2C_REVNB_HI,
OMAP_I2C_IRQSTATUS_RAW,
OMAP_I2C_IRQSTATUS,
OMAP_I2C_IRQENABLE_SET,
OMAP_I2C_IRQENABLE_CLR,
};
/* I2C Interrupt Enable Register (OMAP_I2C_IE): */
#define OMAP_I2C_IE_XDR (1 << 14) /* TX Buffer drain int enable */
#define OMAP_I2C_IE_RDR (1 << 13) /* RX Buffer drain int enable */
#define OMAP_I2C_IE_XRDY (1 << 4) /* TX data ready int enable */
#define OMAP_I2C_IE_RRDY (1 << 3) /* RX data ready int enable */
#define OMAP_I2C_IE_ARDY (1 << 2) /* Access ready int enable */
#define OMAP_I2C_IE_NACK (1 << 1) /* No ack interrupt enable */
#define OMAP_I2C_IE_AL (1 << 0) /* Arbitration lost int ena */
/* I2C Status Register (OMAP_I2C_STAT): */
#define OMAP_I2C_STAT_XDR (1 << 14) /* TX Buffer draining */
#define OMAP_I2C_STAT_RDR (1 << 13) /* RX Buffer draining */
#define OMAP_I2C_STAT_BB (1 << 12) /* Bus busy */
#define OMAP_I2C_STAT_ROVR (1 << 11) /* Receive overrun */
#define OMAP_I2C_STAT_XUDF (1 << 10) /* Transmit underflow */
#define OMAP_I2C_STAT_AAS (1 << 9) /* Address as slave */
#define OMAP_I2C_STAT_AD0 (1 << 8) /* Address zero */
#define OMAP_I2C_STAT_XRDY (1 << 4) /* Transmit data ready */
#define OMAP_I2C_STAT_RRDY (1 << 3) /* Receive data ready */
#define OMAP_I2C_STAT_ARDY (1 << 2) /* Register access ready */
#define OMAP_I2C_STAT_NACK (1 << 1) /* No ack interrupt enable */
#define OMAP_I2C_STAT_AL (1 << 0) /* Arbitration lost int ena */
/* I2C WE wakeup enable register */
#define OMAP_I2C_WE_XDR_WE (1 << 14) /* TX drain wakup */
#define OMAP_I2C_WE_RDR_WE (1 << 13) /* RX drain wakeup */
#define OMAP_I2C_WE_AAS_WE (1 << 9) /* Address as slave wakeup*/
#define OMAP_I2C_WE_BF_WE (1 << 8) /* Bus free wakeup */
#define OMAP_I2C_WE_STC_WE (1 << 6) /* Start condition wakeup */
#define OMAP_I2C_WE_GC_WE (1 << 5) /* General call wakeup */
#define OMAP_I2C_WE_DRDY_WE (1 << 3) /* TX/RX data ready wakeup */
#define OMAP_I2C_WE_ARDY_WE (1 << 2) /* Reg access ready wakeup */
#define OMAP_I2C_WE_NACK_WE (1 << 1) /* No acknowledgment wakeup */
#define OMAP_I2C_WE_AL_WE (1 << 0) /* Arbitration lost wakeup */
#define OMAP_I2C_WE_ALL (OMAP_I2C_WE_XDR_WE | OMAP_I2C_WE_RDR_WE | \
OMAP_I2C_WE_AAS_WE | OMAP_I2C_WE_BF_WE | \
OMAP_I2C_WE_STC_WE | OMAP_I2C_WE_GC_WE | \
OMAP_I2C_WE_DRDY_WE | OMAP_I2C_WE_ARDY_WE | \
OMAP_I2C_WE_NACK_WE | OMAP_I2C_WE_AL_WE)
/* I2C Buffer Configuration Register (OMAP_I2C_BUF): */
#define OMAP_I2C_BUF_RDMA_EN (1 << 15) /* RX DMA channel enable */
#define OMAP_I2C_BUF_RXFIF_CLR (1 << 14) /* RX FIFO Clear */
#define OMAP_I2C_BUF_XDMA_EN (1 << 7) /* TX DMA channel enable */
#define OMAP_I2C_BUF_TXFIF_CLR (1 << 6) /* TX FIFO Clear */
/* I2C Configuration Register (OMAP_I2C_CON): */
#define OMAP_I2C_CON_EN (1 << 15) /* I2C module enable */
#define OMAP_I2C_CON_BE (1 << 14) /* Big endian mode */
#define OMAP_I2C_CON_OPMODE_HS (1 << 12) /* High Speed support */
#define OMAP_I2C_CON_STB (1 << 11) /* Start byte mode (master) */
#define OMAP_I2C_CON_MST (1 << 10) /* Master/slave mode */
#define OMAP_I2C_CON_TRX (1 << 9) /* TX/RX mode (master only) */
#define OMAP_I2C_CON_XA (1 << 8) /* Expand address */
#define OMAP_I2C_CON_RM (1 << 2) /* Repeat mode (master only) */
#define OMAP_I2C_CON_STP (1 << 1) /* Stop cond (master only) */
#define OMAP_I2C_CON_STT (1 << 0) /* Start condition (master) */
/* I2C SCL time value when Master */
#define OMAP_I2C_SCLL_HSSCLL 8
#define OMAP_I2C_SCLH_HSSCLH 8
/* I2C System Test Register (OMAP_I2C_SYSTEST): */
#define OMAP_I2C_SYSTEST_ST_EN (1 << 15) /* System test enable */
#define OMAP_I2C_SYSTEST_FREE (1 << 14) /* Free running mode */
#define OMAP_I2C_SYSTEST_TMODE_MASK (3 << 12) /* Test mode select */
#define OMAP_I2C_SYSTEST_TMODE_TEST (2 << 12) /* Test mode select */
#define OMAP_I2C_SYSTEST_TMODE_LOOP (3 << 12) /* Test mode select */
#define OMAP_I2C_SYSTEST_SCL_I (1 << 3) /* SCL line sense in */
#define OMAP_I2C_SYSTEST_SCL_O (1 << 2) /* SCL line drive out */
#define OMAP_I2C_SYSTEST_SDA_I (1 << 1) /* SDA line sense in */
#define OMAP_I2C_SYSTEST_SDA_O (1 << 0) /* SDA line drive out */
/* I2C System Status Register */
#define OMAP_I2C_SYSS_RDONE BIT(0) /* Reset done */
/* Errata definitions */
#define I2C_OMAP_ERRATA_I207 (1 << 0)
#define I2C_OMAP3_1P153 (1 << 1)
struct omap_i2c_dev {
struct device *dev;
void __iomem *base; /* virtual */
int irq;
int reg_shift; /* bit shift for I2C register addresses */
struct completion cmd_complete;
struct resource *ioarea;
u32 latency; /* maximum mpu wkup latency */
int (*device_reset)(struct device *dev);
struct pm_qos_request_list *pm_qos;
u32 speed; /* Speed of bus in Khz */
u16 cmd_err;
u8 *buf;
u8 *regs;
size_t buf_len;
struct i2c_adapter adapter;
u8 fifo_size; /* use as flag and value
* fifo_size==0 implies no fifo
* if set, should be trsh+1
*/
u8 rev;
bool shutdown;
unsigned b_hw:1; /* bad h/w fixes */
unsigned idle:1;
u16 iestate; /* Saved interrupt register */
u16 pscstate;
u16 scllstate;
u16 sclhstate;
u16 bufstate;
u16 westate;
u16 errata;
#ifdef CONFIG_OMAP4_DPLL_CASCADING
struct notifier_block nb;
int dpll_entry;
int dpll_exit;
unsigned long i2c_fclk_rate;
spinlock_t dpll_lock;
#endif
};
const static u8 reg_map[] = {
[OMAP_I2C_REV_REG] = 0x00,
[OMAP_I2C_IE_REG] = 0x01,
[OMAP_I2C_STAT_REG] = 0x02,
[OMAP_I2C_IV_REG] = 0x03,
[OMAP_I2C_WE_REG] = 0x03,
[OMAP_I2C_SYSS_REG] = 0x04,
[OMAP_I2C_BUF_REG] = 0x05,
[OMAP_I2C_CNT_REG] = 0x06,
[OMAP_I2C_DATA_REG] = 0x07,
[OMAP_I2C_CON_REG] = 0x09,
[OMAP_I2C_OA_REG] = 0x0a,
[OMAP_I2C_SA_REG] = 0x0b,
[OMAP_I2C_PSC_REG] = 0x0c,
[OMAP_I2C_SCLL_REG] = 0x0d,
[OMAP_I2C_SCLH_REG] = 0x0e,
[OMAP_I2C_SYSTEST_REG] = 0x0f,
[OMAP_I2C_BUFSTAT_REG] = 0x10,
};
const static u8 omap4_reg_map[] = {
[OMAP_I2C_REV_REG] = 0x04,
[OMAP_I2C_IE_REG] = 0x2c,
[OMAP_I2C_STAT_REG] = 0x28,
[OMAP_I2C_IV_REG] = 0x34,
[OMAP_I2C_WE_REG] = 0x34,
[OMAP_I2C_SYSS_REG] = 0x90,
[OMAP_I2C_BUF_REG] = 0x94,
[OMAP_I2C_CNT_REG] = 0x98,
[OMAP_I2C_DATA_REG] = 0x9c,
[OMAP_I2C_CON_REG] = 0xa4,
[OMAP_I2C_OA_REG] = 0xa8,
[OMAP_I2C_SA_REG] = 0xac,
[OMAP_I2C_PSC_REG] = 0xb0,
[OMAP_I2C_SCLL_REG] = 0xb4,
[OMAP_I2C_SCLH_REG] = 0xb8,
[OMAP_I2C_SYSTEST_REG] = 0xbC,
[OMAP_I2C_BUFSTAT_REG] = 0xc0,
[OMAP_I2C_REVNB_LO] = 0x00,
[OMAP_I2C_REVNB_HI] = 0x04,
[OMAP_I2C_IRQSTATUS_RAW] = 0x24,
[OMAP_I2C_IRQSTATUS] = 0x28,
[OMAP_I2C_IRQENABLE_SET] = 0x2c,
[OMAP_I2C_IRQENABLE_CLR] = 0x30,
};
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
int reg, u16 val)
{
__raw_writew(val, i2c_dev->base +
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
{
return __raw_readw(i2c_dev->base +
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static int omap_i2c_hwspinlock_lock(struct omap_i2c_dev *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
int ret = 0;
if (pdata->hwspin_lock_timeout) {
ret = pdata->hwspin_lock_timeout(pdata->handle, 100);
if (ret != 0)
dev_err(&pdev->dev, "%s: TIMEDOUT: Failed to acquire "
"hwspinlock\n", __func__);
return ret;
} else
return -EINVAL;
}
static void omap_i2c_hwspinlock_unlock(struct omap_i2c_dev *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
if (pdata->hwspin_unlock)
pdata->hwspin_unlock(pdata->handle);
}
static void omap_i2c_unidle(struct omap_i2c_dev *dev)
{
struct platform_device *pdev;
struct omap_i2c_bus_platform_data *pdata;
WARN_ON(!dev->idle);
pdev = to_platform_device(dev->dev);
pdata = pdev->dev.platform_data;
pm_runtime_get_sync(&pdev->dev);
if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
unsigned long delay;
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, dev->bufstate);
omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
delay = jiffies + OMAP_I2C_TIMEOUT;
while (!(omap_i2c_read_reg(dev, OMAP_I2C_SYSS_REG)
& OMAP_I2C_SYSS_RDONE)) {
if (time_after(jiffies, delay)) {
dev_err(dev->dev, "omap i2c unidle timeout\n");
return;
}
cpu_relax();
}
}
dev->idle = 0;
if (cpu_is_omap44xx() && dev->rev >= OMAP_I2C_REV_ON_4430) {
omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR,0x6FFF);
omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_SET, dev->iestate);
} else {
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
}
}
static void omap_i2c_idle(struct omap_i2c_dev *dev)
{
struct platform_device *pdev;
struct omap_i2c_bus_platform_data *pdata;
u16 iv;
WARN_ON(dev->idle);
pdev = to_platform_device(dev->dev);
pdata = pdev->dev.platform_data;
if (cpu_is_omap44xx() && dev->rev >= OMAP_I2C_REV_ON_4430)
omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 0x6FFF);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
if (dev->rev < OMAP_I2C_REV_2) {
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
}
dev->idle = 1;
pm_runtime_put_sync(&pdev->dev);
}
static inline void omap_i2c_reset(struct omap_i2c_dev *dev)
{
if (!dev->device_reset)
return;
if (dev->device_reset(dev->dev) < 0)
dev_err(dev->dev, "reset failed\n");
}
static int omap_i2c_init(struct omap_i2c_dev *dev)
{
u16 psc = 0, scll = 0, sclh = 0, buf = 0;
u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
unsigned long fclk_rate = 12000000;
unsigned long internal_clk = 0;
struct clk *fclk;
if (dev->rev >= OMAP_I2C_REV_ON_3430) {
/*
* Enabling all wakup sources to stop I2C freezing on
* WFI instruction.
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
if (cpu_class_is_omap1()) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
* always returns 12MHz for the functional clock, we can
* do this bit unconditionally.
*/
fclk = clk_get(dev->dev, "fck");
fclk_rate = clk_get_rate(fclk);
clk_put(fclk);
/* TRM for 5912 says the I2C clock must be prescaled to be
* between 7 - 12 MHz. The XOR input clock is typically
* 12, 13 or 19.2 MHz. So we should have code that produces:
*
* XOR MHz Divider Prescaler
* 12 1 0
* 13 2 1
* 19.2 2 1
*/
if (fclk_rate > 12000000)
psc = fclk_rate / 12000000;
}
if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
* HS and for all modes on 2430. On 34xx we can use lower rate
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
if (dev->speed > 400 || cpu_is_omap2430())
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
else
internal_clk = 4000;
fclk = clk_get(dev->dev, "fck");
fclk_rate = clk_get_rate(fclk) / 1000;
#ifdef CONFIG_OMAP4_DPLL_CASCADING
dev->i2c_fclk_rate = fclk_rate;
#endif
clk_put(fclk);
/* Compute prescaler divisor */
psc = fclk_rate / internal_clk;
psc = psc - 1;
/* If configured for High Speed */
if (dev->speed > 400) {
unsigned long scl;
/* For first phase of HS mode */
scl = internal_clk / 400;
fsscll = scl - (scl / 3) - 7;
fssclh = (scl / 3) - 5;
/* For second phase of HS mode */
scl = fclk_rate / dev->speed;
hsscll = scl - (scl / 3) - 7;
hssclh = (scl / 3) - 5;
} else if (dev->speed > 100) {
unsigned long scl;
/* Fast mode */
scl = internal_clk / dev->speed;
fsscll = scl - (scl / 3) - 7;
fssclh = (scl / 3) - 5;
} else {
/* Standard mode */
fsscll = internal_clk / (dev->speed * 2) - 7;
fssclh = internal_clk / (dev->speed * 2) - 5;
}
scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
} else {
/* Program desired operating rate */
fclk_rate /= (psc + 1) * 1000;
if (psc > 2)
psc = 2;
scll = fclk_rate / (dev->speed * 2) - 7 + psc;
sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
}
/* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);
/* SCL low and high time values */
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);
if (dev->fifo_size) {
/* Note: setup required fifo size - 1. RTRSH and XTRSH */
buf = (dev->fifo_size - 1) << 8 | OMAP_I2C_BUF_RXFIF_CLR |
(dev->fifo_size - 1) | OMAP_I2C_BUF_TXFIF_CLR;
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
}
/* Take the I2C module out of reset: */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
dev->errata = 0;
if (cpu_is_omap2430() || cpu_is_omap34xx())
dev->errata |= I2C_OMAP_ERRATA_I207;
if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
dev->bufstate = buf;
}
return 0;
}
/*
* Waiting on Bus Busy
*/
static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev)
{
unsigned long timeout;
timeout = jiffies + OMAP_I2C_TIMEOUT;
while (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
if (time_after(jiffies, timeout)) {
dev_warn(dev->dev, "timeout waiting for bus ready\n");
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
/*
* Bus Clear
*/
static int omap_i2c_bus_clear(struct omap_i2c_dev *dev)
{
u16 w;
/* Per the I2C specification, if we are stuck in a bus busy state
* we can attempt a bus clear to try and recover the bus by sending
* at least 9 clock pulses on SCL. Put the I2C in a test mode so it
* will output a continuous clock on SCL.
*/
disable_irq(dev->irq);
w = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG,
(OMAP_I2C_SYSTEST_ST_EN | OMAP_I2C_SYSTEST_TMODE_TEST));
msleep(1);
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, w);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_reset(dev);
omap_i2c_init(dev);
enable_irq(dev->irq);
return omap_i2c_wait_for_bb(dev);
}
/*
* Low level master read/write transaction.
*/
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
struct i2c_msg *msg, int stop)
{
struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
int r;
u16 w;
dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
if (msg->len == 0)
return -EINVAL;
omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr);
/* REVISIT: Could the STB bit of I2C_CON be used with probing? */
dev->buf = msg->buf;
dev->buf_len = msg->len;
omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
/* Clear the FIFO Buffers */
w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG);
w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);
init_completion(&dev->cmd_complete);
dev->cmd_err = 0;
w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
/* High speed configuration */
if (dev->speed > 400)
w |= OMAP_I2C_CON_OPMODE_HS;
if (msg->flags & I2C_M_TEN)
w |= OMAP_I2C_CON_XA;
if (!(msg->flags & I2C_M_RD))
w |= OMAP_I2C_CON_TRX;
if (!dev->b_hw && stop)
w |= OMAP_I2C_CON_STP;
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
/*
* Don't write stt and stp together on some hardware.
*/
if (dev->b_hw && stop) {
unsigned long delay = jiffies + OMAP_I2C_TIMEOUT;
u16 con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
while (con & OMAP_I2C_CON_STT) {
con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
/* Let the user know if i2c is in a bad state */
if (time_after(jiffies, delay)) {
dev_err(dev->dev, "controller timed out "
"waiting for start condition to finish\n");
return -ETIMEDOUT;
}
cpu_relax();
}
w |= OMAP_I2C_CON_STP;
w &= ~OMAP_I2C_CON_STT;
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
}
/*
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
r = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT);
dev->buf_len = 0;
if (r == 0) {
dev_err(dev->dev, "controller timed out\n");
omap_i2c_reset(dev);
omap_i2c_init(dev);
return -ETIMEDOUT;
}
if (likely(!dev->cmd_err))
return 0;
/* We have an error */
if (dev->cmd_err & OMAP_I2C_STAT_AL) {
omap_i2c_reset(dev);
omap_i2c_init(dev);
return -EIO;
}
if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
return 0;
if (stop) {
w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
w |= OMAP_I2C_CON_STP;
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
}
return -EREMOTEIO;
}
return -EIO;
}
#ifdef CONFIG_OMAP4_DPLL_CASCADING
static void omap_i2c_dpll_configure(struct omap_i2c_dev *dev,
struct omap_i2c_bus_platform_data *pdata,
unsigned long fclk_rate)
{
unsigned long internal_clk;
u16 psc = 0, scll = 0, sclh = 0;
u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
/*
* FIXME: check I2C_HAS_FASTMODE_PLUS feature
* when dev->speed > 1000
*/
if (dev->speed > 400)
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
else
internal_clk = 4000;
psc = fclk_rate / internal_clk;
psc = psc - 1;
if (dev->speed > 400) {
unsigned long scl;
/* For first phase of HS mode */
scl = internal_clk / 400;
fsscll = scl - (scl / 3) - 7;
fssclh = (scl / 3) - 5;
/* For second phase of HS mode */
scl = fclk_rate / dev->speed;
hsscll = scl - (scl / 3) - 7;
hssclh = (scl / 3) - 5;
} else if (dev->speed > 100) {
unsigned long scl;
/* Fast mode */
scl = internal_clk / dev->speed;
fsscll = scl - (scl / 3) - 7;
fssclh = (scl / 3) - 5;
} else {
/* Standard mode */
fsscll = internal_clk / (dev->speed * 2) - 7;
fssclh = internal_clk / (dev->speed * 2) - 5;
}
scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
/* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);
/* SCL low and high time values */
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);
}
static int omap_i2c_dpll_notifier(struct notifier_block *nb,
unsigned long val, void *data)
{
struct omap_i2c_dev *dev = container_of(nb, struct omap_i2c_dev, nb);
struct clk_notifier_data *cnd = (struct clk_notifier_data *)data;
unsigned int count = 0;
spin_lock(&dev->dpll_lock);
if (val == CLK_POST_RATE_CHANGE &&
cnd->old_rate == OMAP_I2C_MASTER_CLOCK)
dev->dpll_entry = 1;
else if (val == CLK_PRE_RATE_CHANGE &&
cnd->old_rate == OMAP_I2C_DPLL_CLOCK) {
/*
* If the device is not idle in the DPLL exit
* wait for the bus to become free.
*/
if (0 == dev->idle) {
while (omap_i2c_read_reg(dev,
OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
if (count++ == 100) {
dev_warn(dev->dev,
"I2C is busy during DPLL cascading exit\n");
break;
}
}
}
dev->dpll_exit = 1;
}
spin_unlock(&dev->dpll_lock);
return 0;
}
#endif
/*
* Prepare controller for a transaction and call omap_i2c_xfer_msg
* to do the work during IRQ processing.
*/
static int
omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
int i;
int r;
#ifdef CONFIG_OMAP4_DPLL_CASCADING
struct platform_device *pdev;
struct omap_i2c_bus_platform_data *pdata;
#endif
if (dev == NULL)
return -EINVAL;
#ifdef CONFIG_OMAP4_DPLL_CASCADING
pdev = container_of(dev->dev, struct platform_device, dev);
pdata = pdev->dev.platform_data;
#endif
if (dev->shutdown)
return -EPERM;
r = omap_i2c_hwspinlock_lock(dev);
/* To-Do: if we are unable to acquire the lock, we must
try to recover somehow */
if (r != 0)
return r;
/* We have the bus, enable IRQ */
enable_irq(dev->irq);
omap_i2c_unidle(dev);
r = omap_i2c_wait_for_bb(dev);
if (r < 0)
r = omap_i2c_bus_clear(dev);
if (r < 0)
goto out;
/*
* When waiting for completion of a i2c transfer, we need to
* set a wake up latency constraint for the MPU. This is to
* ensure quick enough wakeup from idle, when transfer
* completes.
*/
if (dev->pm_qos)
pm_qos_update_request(dev->pm_qos, dev->latency);
#ifdef CONFIG_OMAP4_DPLL_CASCADING
spin_lock(&dev->dpll_lock);
if (dev->dpll_entry == 1) {
dev->dpll_entry = 0;
/*
* FIXME: Speed > 1000 can not be supported
* in DPLL cascading mode.
*/
if (dev->speed > 1000) {
spin_unlock(&dev->dpll_lock);
return -1;
}
omap_i2c_dpll_configure(dev, pdata, OMAP_I2C_DPLL_CLOCK / 1000);
} else if (dev->dpll_exit == 1) {
dev->dpll_exit = 0;
omap_i2c_dpll_configure(dev, pdata, dev->i2c_fclk_rate);
}
spin_unlock(&dev->dpll_lock);
#endif
for (i = 0; i < num; i++) {
r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
if (r != 0)
break;
}
if (dev->pm_qos)
pm_qos_update_request(dev->pm_qos, PM_QOS_DEFAULT_VALUE);
if (r == 0)
r = num;
omap_i2c_wait_for_bb(dev);
out:
omap_i2c_idle(dev);
omap_i2c_hwspinlock_unlock(dev);
disable_irq(dev->irq);
return r;
}
static u32
omap_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
}
static inline void
omap_i2c_complete_cmd(struct omap_i2c_dev *dev, u16 err)
{
dev->cmd_err |= err;
complete(&dev->cmd_complete);
}
static inline void
omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat)
{
omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
}
static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
{
/*
* I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8)
* Not applicable for OMAP4.
* Under certain rare conditions, RDR could be set again
* when the bus is busy, then ignore the interrupt and
* clear the interrupt.
*/
if (stat & OMAP_I2C_STAT_RDR) {
/* Step 1: If RDR is set, clear it */
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
/* Step 2: */
if (!(omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
& OMAP_I2C_STAT_BB)) {
/* Step 3: */
if (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
& OMAP_I2C_STAT_RDR) {
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
dev_dbg(dev->dev, "RDR when bus is busy.\n");
}
}
}
}
/* rev1 devices are apparently only on some 15xx */
#ifdef CONFIG_ARCH_OMAP15XX
static irqreturn_t
omap_i2c_rev1_isr(int this_irq, void *dev_id)
{
struct omap_i2c_dev *dev = dev_id;
u16 iv, w;
if (dev->idle)
return IRQ_NONE;
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG);
switch (iv) {
case 0x00: /* None */
break;
case 0x01: /* Arbitration lost */
dev_err(dev->dev, "Arbitration lost\n");
omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_AL);
break;
case 0x02: /* No acknowledgement */
omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_NACK);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP);
break;
case 0x03: /* Register access ready */
omap_i2c_complete_cmd(dev, 0);
break;
case 0x04: /* Receive data ready */
if (dev->buf_len) {
w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG);
*dev->buf++ = w;
dev->buf_len--;
if (dev->buf_len) {
*dev->buf++ = w >> 8;
dev->buf_len--;
}
} else
dev_err(dev->dev, "RRDY IRQ while no data requested\n");
break;
case 0x05: /* Transmit data ready */
if (dev->buf_len) {
w = *dev->buf++;
dev->buf_len--;
if (dev->buf_len) {
w |= *dev->buf++ << 8;
dev->buf_len--;
}
omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
} else
dev_err(dev->dev, "XRDY IRQ while no data to send\n");
break;
default:
return IRQ_NONE;
}
return IRQ_HANDLED;
}
#else
#define omap_i2c_rev1_isr NULL
#endif
/*
* OMAP3430 Errata 1.153: When an XRDY/XDR is hit, wait for XUDF before writing
* data to DATA_REG. Otherwise some data bytes can be lost while transferring
* them from the memory to the I2C interface.
*/
static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err)
{
unsigned long timeout = 10000;
while (--timeout && !(*stat & OMAP_I2C_STAT_XUDF)) {
if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY |
OMAP_I2C_STAT_XDR));
*err |= OMAP_I2C_STAT_XUDF;
return -ETIMEDOUT;
}
cpu_relax();
*stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
}
if (!timeout) {
dev_err(dev->dev, "timeout waiting on XUDF bit\n");
return 0;
}
return 0;
}
static irqreturn_t
omap_i2c_isr(int this_irq, void *dev_id)
{
struct omap_i2c_dev *dev = dev_id;
u16 stat, w;
int err, count = 0;
if (dev->idle || dev->shutdown)
return IRQ_NONE;
while ((stat = (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG))) & dev->iestate) {
dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat);
if (count++ == 100) {
dev_warn(dev->dev, "Too much work in one IRQ\n");
break;
}
err = 0;
complete:
/*
* Ack the stat in one go, but [R/X]DR and [R/X]RDY should be
* acked after the data operation is complete.
* Ref: TRM SWPU114Q Figure 18-31
*/
omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat &
~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
if (stat & OMAP_I2C_STAT_NACK)
err |= OMAP_I2C_STAT_NACK;
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
}
/*
* ProDB0017052: Clear ARDY bit twice
*/
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, stat &
(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
OMAP_I2C_STAT_ARDY));
omap_i2c_complete_cmd(dev, err);
return IRQ_HANDLED;
}
if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) {
u8 num_bytes = 1;
if (dev->errata & I2C_OMAP_ERRATA_I207)
i2c_omap_errata_i207(dev, stat);
if (dev->fifo_size) {
if (stat & OMAP_I2C_STAT_RRDY)
num_bytes = dev->fifo_size;
else /* read RXSTAT on RDR interrupt */
num_bytes = (omap_i2c_read_reg(dev,
OMAP_I2C_BUFSTAT_REG)
>> 8) & 0x3F;
}
while (num_bytes) {
num_bytes--;
w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG);
if (dev->buf_len) {
*dev->buf++ = w;
dev->buf_len--;
/*
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
if (cpu_class_is_omap1() ||
cpu_is_omap2420()) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
dev->buf_len--;
}
}
} else {
if (stat & OMAP_I2C_STAT_RRDY)
dev_err(dev->dev,
"RRDY IRQ while no data"
" requested\n");
if (stat & OMAP_I2C_STAT_RDR)
dev_err(dev->dev,
"RDR IRQ while no data"
" requested\n");
break;
}
}
omap_i2c_ack_stat(dev,
stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR));
continue;
}
if (stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)) {
u8 num_bytes = 1;
if (dev->fifo_size) {
if (stat & OMAP_I2C_STAT_XRDY)
num_bytes = dev->fifo_size;
else /* read TXSTAT on XDR interrupt */
num_bytes = omap_i2c_read_reg(dev,
OMAP_I2C_BUFSTAT_REG)
& 0x3F;
}
while (num_bytes) {
num_bytes--;
w = 0;
if (dev->buf_len) {
w = *dev->buf++;
dev->buf_len--;
/*
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
if (cpu_class_is_omap1() ||
cpu_is_omap2420()) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
dev->buf_len--;
}
}
} else {
if (stat & OMAP_I2C_STAT_XRDY)
dev_err(dev->dev,
"XRDY IRQ while no "
"data to send\n");
if (stat & OMAP_I2C_STAT_XDR)
dev_err(dev->dev,
"XDR IRQ while no "
"data to send\n");
break;
}
if ((dev->errata & I2C_OMAP3_1P153) &&
errata_omap3_1p153(dev, &stat, &err))
goto complete;
omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
}
omap_i2c_ack_stat(dev,
stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
continue;
}
if (stat & OMAP_I2C_STAT_ROVR) {
dev_dbg(dev->dev, "Receive overrun\n");
}
if (stat & OMAP_I2C_STAT_XUDF) {
dev_dbg(dev->dev, "Transmit underflow\n");
}
}
return count ? IRQ_HANDLED : IRQ_NONE;
}
static const struct i2c_algorithm omap_i2c_algo = {
.master_xfer = omap_i2c_xfer,
.functionality = omap_i2c_func,
};
static int __devinit
omap_i2c_probe(struct platform_device *pdev)
{
struct omap_i2c_dev *dev;
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
irq_handler_t isr;
int r;
u32 speed = 0;
#ifdef CONFIG_OMAP4_DPLL_CASCADING
struct clk *fclks;
#endif
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
return -ENODEV;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "no irq resource?\n");
return -ENODEV;
}
ioarea = request_mem_region(mem->start, resource_size(mem),
pdev->name);
if (!ioarea) {
dev_err(&pdev->dev, "I2C region already claimed\n");
return -EBUSY;
}
dev = kzalloc(sizeof(struct omap_i2c_dev), GFP_KERNEL);
if (!dev) {
r = -ENOMEM;
goto err_release_region;
}
if (pdata) {
speed = pdata->clkrate;
dev->device_reset = pdata->device_reset;
} else {
speed = 100; /* Default speed */
}
dev->speed = speed;
dev->idle = 1;
dev->dev = &pdev->dev;
dev->irq = irq->start;
dev->base = ioremap(mem->start, resource_size(mem));
if (!dev->base) {
r = -ENOMEM;
goto err_free_mem;
}
if (pdata && pdata->needs_wakeup_latency) {
dev->pm_qos = kzalloc(sizeof(struct pm_qos_request_list),
GFP_KERNEL);
if (!dev->pm_qos) {
r = -ENOMEM;
goto err_unmap;
}
pm_qos_add_request(dev->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
}
platform_set_drvdata(pdev, dev);
if (cpu_is_omap7xx())
dev->reg_shift = 1;
else if (cpu_is_omap44xx())
dev->reg_shift = 0;
else
dev->reg_shift = 2;
if (cpu_is_omap44xx())
dev->regs = (u8 *) omap4_reg_map;
else
dev->regs = (u8 *) reg_map;
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
dev->idle = 0;
dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
if (dev->rev <= OMAP_I2C_REV_ON_3430)
dev->errata |= I2C_OMAP3_1P153;
if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
u16 s;
/* Set up the fifo size - Get total size */
s = (omap_i2c_read_reg(dev, OMAP_I2C_BUFSTAT_REG) >> 14) & 0x3;
dev->fifo_size = 0x8 << s;
/*
* Set up notification threshold as half the total available
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
dev->fifo_size = (dev->fifo_size / 2);
if (dev->rev >= OMAP_I2C_REV_ON_4430)
dev->b_hw = 0; /* Disable hardware fixes */
else
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
if (dev->pm_qos)
dev->latency = (1000000 * dev->fifo_size) /
(1000 * speed / 8);
}
#ifdef CONFIG_OMAP4_DPLL_CASCADING
/* Register notifiers to support DPLL cascading */
spin_lock_init(&dev->dpll_lock);
fclks = clk_get(dev->dev, "fck");
dev->nb.notifier_call = omap_i2c_dpll_notifier;
dev->nb.next = NULL;
clk_notifier_register(fclks, &dev->nb);
clk_put(fclks);
#endif
/* reset ASAP, clearing any IRQs */
omap_i2c_init(dev);
/* Decide what interrupts are needed */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
isr = (dev->rev < OMAP_I2C_REV_2) ? omap_i2c_rev1_isr : omap_i2c_isr;
r = request_irq(dev->irq, isr, 0, pdev->name, dev);
if (r) {
dev_err(dev->dev, "failure requesting irq %i\n", dev->irq);
goto err_unuse_clocks;
}
/* We enable IRQ only when request for I2C from master */
disable_irq(dev->irq);
dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n",
pdev->id, dev->rev >> 4, dev->rev & 0xf, dev->speed);
omap_i2c_idle(dev);
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON;
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->dev.parent = &pdev->dev;
/* i2c device drivers may be active on return from add_adapter() */
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
if (r) {
dev_err(dev->dev, "failure adding adapter\n");
goto err_free_irq;
}
return 0;
err_free_irq:
free_irq(dev->irq, dev);
err_unuse_clocks:
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_idle(dev);
if (dev->pm_qos) {
pm_qos_remove_request(dev->pm_qos);
kfree(dev->pm_qos);
}
err_unmap:
iounmap(dev->base);
err_free_mem:
platform_set_drvdata(pdev, NULL);
kfree(dev);
err_release_region:
release_mem_region(mem->start, resource_size(mem));
return r;
}
static int
omap_i2c_remove(struct platform_device *pdev)
{
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
platform_set_drvdata(pdev, NULL);
free_irq(dev->irq, dev);
i2c_del_adapter(&dev->adapter);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
iounmap(dev->base);
if (dev->pm_qos) {
pm_qos_remove_request(dev->pm_qos);
kfree(dev->pm_qos);
}
kfree(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
return 0;
}
#define PMIC_I2C_NAME "omap_i2c.1"
static void
omap_i2c_shutdown(struct platform_device *pdev)
{
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
/* Keep pmic i2c alive - for pm_power_off case */
if (!strcmp(dev_name(dev->dev), PMIC_I2C_NAME))
return;
/* Shutdown all other i2c controllers */
pm_runtime_get_sync(&pdev->dev);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
/* Keep interrupts disabled */
free_irq(dev->irq, dev);
if (cpu_is_omap44xx() && dev->rev >= OMAP_I2C_REV_ON_4430)
omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 0x6FFF);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
pm_runtime_put_sync(&pdev->dev);
dev->shutdown = true;
}
#ifdef CONFIG_SUSPEND
static int omap_i2c_suspend(struct device *dev)
{
if (dev->power.runtime_auto == false)
pm_runtime_put_sync(dev);
return 0;
}
static int omap_i2c_resume(struct device *dev)
{
if (dev->power.runtime_auto == false)
pm_runtime_get_sync(dev);
return 0;
}
static struct dev_pm_ops omap_i2c_pm_ops = {
.suspend = omap_i2c_suspend,
.resume = omap_i2c_resume,
};
#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
#else
#define OMAP_I2C_PM_OPS NULL
#endif
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
.pm = OMAP_I2C_PM_OPS,
},
.shutdown = omap_i2c_shutdown,
};
/* I2C may be needed to bring up other drivers */
static int __init
omap_i2c_init_driver(void)
{
return platform_driver_register(&omap_i2c_driver);
}
subsys_initcall(omap_i2c_init_driver);
static void __exit omap_i2c_exit_driver(void)
{
platform_driver_unregister(&omap_i2c_driver);
}
module_exit(omap_i2c_exit_driver);
MODULE_AUTHOR("MontaVista Software, Inc. (and others)");
MODULE_DESCRIPTION("TI OMAP I2C bus adapter");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_i2c");
| TaichiN/kernel_omap_otter-common | drivers/i2c/busses/i2c-omap.c | C | gpl-2.0 | 38,308 |
test:
./node_modules/.bin/istanbul cover ./node_modules/.bin/_mocha -- -R spec -t 20000
coveralls: test
cat ./coverage/lcov.info | ./node_modules/.bin/coveralls
debug:
node $(NODE_DEBUG) ./node_modules/.bin/_mocha -R spec -t 20000
.PHONY: test
| sammyboy45467/Portfolio | wp-content/themes/themer/node_modules/rename/Makefile | Makefile | gpl-2.0 | 250 |
<?php
/**
* Copyright 2012-2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace OpenCloud\Orchestration\Resource;
use OpenCloud\Common\Resource\PersistentResource;
/**
* Class that represents a stack.
* @see http://developer.openstack.org/api-ref-orchestration-v1.html#stacks
*
* @package OpenCloud\Orchestration\Resource
*/
class Stack extends PersistentResource
{
protected static $url_resource = 'stacks';
protected static $json_name = 'stack';
protected $id;
protected $parentStack; // Named so because the Base class has a $parent member.
protected $disableRollback;
protected $description;
protected $parameters;
protected $environment;
protected $files;
protected $name;
protected $status;
protected $statusReason;
protected $outputs;
protected $creationTime;
protected $updatedTime;
protected $timeoutMins;
protected $templateUrl;
protected $template;
protected $adoptStackData;
protected $links;
protected $aliases = array(
'parent' => 'parentStack',
'disable_rollback' => 'disableRollback',
'stack_name' => 'name',
'stack_status' => 'status',
'stack_status_reason' => 'statusReason',
'creation_time' => 'creationTime',
'updated_time' => 'updatedTime',
'timeout_mins' => 'timeoutMins',
'template_url' => 'templateUrl',
'adopt_stack_data' => 'adoptStackData'
);
protected $createKeys = array(
'name',
'templateUrl',
'template',
'environment',
'files',
'parameters',
'timeoutMins',
'adoptStackData'
);
protected $updateKeys = array(
'templateUrl',
'template',
'environment',
'files',
'parameters',
'timeoutMins'
);
protected function createJson()
{
$createJson = parent::createJson();
return $createJson->{self::$json_name};
}
protected function updateJson($params = array())
{
$updateJson = parent::updateJson($params);
return $updateJson->{self::$json_name};
}
/**
* Creates a new stack by adopting resources from an abandoned stack
*
* @param array $params Adopt stack parameters
* @return Guzzle\Http\Message\Response
*/
public function adopt($params)
{
// Validate that required parameters are provided
$requiredParameterName = 'adoptStackData';
if (!array_key_exists($requiredParameterName, $params)) {
throw new \InvalidArgumentException($requiredParameterName . ' is a required option');
}
return $this->create($params);
}
/**
* Previews the stack without actually creating it
*
* @param array $params Preview stack parameters
* @return Guzzle\Http\Message\Response
*/
public function preview($params = array())
{
// set parameters
if (!empty($params)) {
$this->populate($params, false);
}
// construct the JSON
$json = json_encode($this->createJson());
$this->checkJsonError();
$previewUrl = $this->previewUrl();
$response = $this->getClient()->post($previewUrl, self::getJsonHeader(), $json)->send();
$decoded = $this->parseResponse($response);
$this->populate($decoded);
return $response;
}
/**
* Abandons the stack and returns abandoned stack data.
*
* @return string Abandoned stack data (which could be passed to the adopt stack operation as adoptStackData).
*/
public function abandon()
{
$abandonUrl = $this->abandonUrl();
$response = $this->getClient()->delete($abandonUrl)->send();
return $response->getBody(true);
}
/**
* Returns a Resource object associated with this Stack
*
* @param string $name Stack resource name
* @return Resource object
*/
public function getResource($name)
{
return $this->getService()->resource('Resource', $name, $this);
}
/**
* Returns a list of Resources associated with this Stack
*
* @param array $params
* @return \OpenCloud\Common\Collection\PaginatedIterator
*/
public function listResources(array $params = array())
{
$url = clone $this->getUrl();
$url->addPath(Resource::resourceName())->setQuery($params);
return $this->getService()->resourceList('Resource', $url, $this);
}
/**
* Returns a list of Events associated with this Stack
*
* @param array $params
* @return \OpenCloud\Common\Collection\PaginatedIterator
*/
public function listEvents(array $params = array())
{
$url = clone $this->getUrl();
$url->addPath(Event::resourceName())->setQuery($params);
return $this->getService()->resourceList('Event', $url, $this);
}
/**
* Iterator use only
*/
public function event($id)
{
return $this->getService()->resource('Event', $id, $this);
}
/**
* Returns the template for this stack.
*
* @return String template
*/
public function getStackTemplate()
{
$url = clone $this->getUrl();
$url->addPath('template');
$response = $this->getClient()->get($url)->send();
return $response->getBody(true);
}
protected function previewUrl()
{
$url = clone $this->getParent()->getUrl();
$url->addPath(self::resourceName());
$url->addPath('preview');
return $url;
}
protected function abandonUrl()
{
$url = clone $this->getUrl();
$url->addPath('abandon');
return $url;
}
protected function primaryKeyField()
{
return 'name';
}
}
| hhgr/hhgolf | wp-content/plugins/backup_pro/includes/vendor/rackspace/php-opencloud/lib/OpenCloud/Orchestration/Resource/Stack.php | PHP | gpl-2.0 | 6,450 |
//#include <errno.h>
#include "codes.h" //fixme
| nathanlnw/HB_LOCAL_TakePhoto | components/dfs/filesystems/jffs2/kernel/linux/errno.h | C | gpl-2.0 | 48 |
<?php
/**
* @license GPL 2 (http://www.gnu.org/licenses/gpl.html)
*
* @author Paulo Carmino <contato@paulocarmino.com>
*/
$lang['connectfail'] = 'Não foi possível conectar o LDAP: %s';
$lang['domainfail'] = 'O LDAP não encontrou seu usuário';
| smboy86/zzing-wiki | lib/plugins/authldap/lang/pt/lang.php | PHP | gpl-2.0 | 276 |
# Git Code of Conduct
This code of conduct outlines our expectations for participants within
the Git community, as well as steps for reporting unacceptable behavior.
We are committed to providing a welcoming and inspiring community for
all and expect our code of conduct to be honored. Anyone who violates
this code of conduct may be banned from the community.
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
git@sfconservancy.org, or individually:
- Ævar Arnfjörð Bjarmason <avarab@gmail.com>
- Christian Couder <christian.couder@gmail.com>
- Jeff King <peff@peff.net>
- Junio C Hamano <gitster@pobox.com>
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
at [https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations
| tacker66/git | CODE_OF_CONDUCT.md | Markdown | gpl-2.0 | 5,987 |
/*******************************************************************************************
Copyright 2010 Broadcom Corporation. All rights reserved.
Unless you and Broadcom execute a separate written software license agreement
governing use of this software, this software is licensed to you under the
terms of the GNU General Public License version 2, available at
http://www.gnu.org/copyleft/gpl.html (the "GPL").
Notwithstanding the above, under no circumstances may you combine this software
in any way with any other Broadcom software provided under a license other than
the GPL, without Broadcom's express prior written consent.
*******************************************************************************************/
#ifndef __BRCM_RDB_SYSCFG_H__
#define __BRCM_RDB_SYSCFG_H__
#define SYSCFG_IOCR0_OFFSET 0x00000000
#define SYSCFG_IOCR0_TYPE UInt32
#define SYSCFG_IOCR0_RESERVED_MASK 0x00000000
#define SYSCFG_IOCR0_CAMCK_GPIO_MUX_SHIFT 31
#define SYSCFG_IOCR0_CAMCK_GPIO_MUX_MASK 0x80000000
#define SYSCFG_IOCR0_FLASH_SD2_MUX_SHIFT 30
#define SYSCFG_IOCR0_FLASH_SD2_MUX_MASK 0x40000000
#define SYSCFG_IOCR0_LCD_CTRL_MUX_SHIFT 29
#define SYSCFG_IOCR0_LCD_CTRL_MUX_MASK 0x20000000
#define SYSCFG_IOCR0_LCDD1_LCDD15_MUX_SHIFT 28
#define SYSCFG_IOCR0_LCDD1_LCDD15_MUX_MASK 0x10000000
#define SYSCFG_IOCR0_GPEN9_B1_SHIFT 27
#define SYSCFG_IOCR0_GPEN9_B1_MASK 0x08000000
#define SYSCFG_IOCR0_DSP_TEST_PORT_ENABLE_SHIFT 26
#define SYSCFG_IOCR0_DSP_TEST_PORT_ENABLE_MASK 0x04000000
#define SYSCFG_IOCR0_MPHI_MUX_SHIFT 25
#define SYSCFG_IOCR0_MPHI_MUX_MASK 0x02000000
#define SYSCFG_IOCR0_DIGMIC_MUX_SHIFT 24
#define SYSCFG_IOCR0_DIGMIC_MUX_MASK 0x01000000
#define SYSCFG_IOCR0_PCM_MUX_SHIFT 22
#define SYSCFG_IOCR0_PCM_MUX_MASK 0x00C00000
#define SYSCFG_IOCR0_SPI_MUX_HI_SHIFT 21
#define SYSCFG_IOCR0_SPI_MUX_HI_MASK 0x00200000
#define SYSCFG_IOCR0_GPEN11_B1_SHIFT 20
#define SYSCFG_IOCR0_GPEN11_B1_MASK 0x00100000
#define SYSCFG_IOCR0_LCDD16_LCDD17_MUX_SHIFT 19
#define SYSCFG_IOCR0_LCDD16_LCDD17_MUX_MASK 0x00080000
#define SYSCFG_IOCR0_AFCPDM_MUX_SHIFT 18
#define SYSCFG_IOCR0_AFCPDM_MUX_MASK 0x00040000
#define SYSCFG_IOCR0_GPEN11_B0_SHIFT 17
#define SYSCFG_IOCR0_GPEN11_B0_MASK 0x00020000
#define SYSCFG_IOCR0_GPEN10_SHIFT 15
#define SYSCFG_IOCR0_GPEN10_MASK 0x00018000
#define SYSCFG_IOCR0_GPEN9_B0_SHIFT 14
#define SYSCFG_IOCR0_GPEN9_B0_MASK 0x00004000
#define SYSCFG_IOCR0_GPEN8_MUX_SHIFT 13
#define SYSCFG_IOCR0_GPEN8_MUX_MASK 0x00002000
#define SYSCFG_IOCR0_GPEN7_SHIFT 12
#define SYSCFG_IOCR0_GPEN7_MASK 0x00001000
#define SYSCFG_IOCR0_SPI_MUX_SHIFT 11
#define SYSCFG_IOCR0_SPI_MUX_MASK 0x00000800
#define SYSCFG_IOCR0_GPIO16_MUX_SHIFT 10
#define SYSCFG_IOCR0_GPIO16_MUX_MASK 0x00000400
#define SYSCFG_IOCR0_GPIO17_MUX_SHIFT 9
#define SYSCFG_IOCR0_GPIO17_MUX_MASK 0x00000200
#define SYSCFG_IOCR0_GPIO_MUXES_SHIFT 7
#define SYSCFG_IOCR0_GPIO_MUXES_MASK 0x00000180
#define SYSCFG_IOCR0_I2S_MUX_SHIFT 5
#define SYSCFG_IOCR0_I2S_MUX_MASK 0x00000060
#define SYSCFG_IOCR0_SD1_MUX_SHIFT 3
#define SYSCFG_IOCR0_SD1_MUX_MASK 0x00000018
#define SYSCFG_IOCR0_M68_SHIFT 2
#define SYSCFG_IOCR0_M68_MASK 0x00000004
#define SYSCFG_IOCR0_SD3_MUX_SHIFT 0
#define SYSCFG_IOCR0_SD3_MUX_MASK 0x00000003
#define SYSCFG_IOCR1_OFFSET 0x00000004
#define SYSCFG_IOCR1_TYPE UInt32
#define SYSCFG_IOCR1_RESERVED_MASK 0xFFFF0000
#define SYSCFG_IOCR1_KEY_COL_SHIFT 8
#define SYSCFG_IOCR1_KEY_COL_MASK 0x0000FF00
#define SYSCFG_IOCR1_KEY_ROW_SHIFT 0
#define SYSCFG_IOCR1_KEY_ROW_MASK 0x000000FF
#define SYSCFG_SUCR_OFFSET 0x00000008
#define SYSCFG_SUCR_TYPE UInt32
#define SYSCFG_SUCR_RESERVED_MASK 0x2E81FFEB
#define SYSCFG_SUCR_DOWNLOAD_SHIFT 31
#define SYSCFG_SUCR_DOWNLOAD_MASK 0x80000000
#define SYSCFG_SUCR_FLASH_BOOT_SHIFT 30
#define SYSCFG_SUCR_FLASH_BOOT_MASK 0x40000000
#define SYSCFG_SUCR_AP_SHIFT 28
#define SYSCFG_SUCR_AP_MASK 0x10000000
#define SYSCFG_SUCR_VCOBYPASS_SHIFT 24
#define SYSCFG_SUCR_VCOBYPASS_MASK 0x01000000
#define SYSCFG_SUCR_TEST_LOOP_SHIFT 22
#define SYSCFG_SUCR_TEST_LOOP_MASK 0x00400000
#define SYSCFG_SUCR_EJTAG_SEL_SHIFT 21
#define SYSCFG_SUCR_EJTAG_SEL_MASK 0x00200000
#define SYSCFG_SUCR_JTAG_SEL_SHIFT 18
#define SYSCFG_SUCR_JTAG_SEL_MASK 0x001C0000
#define SYSCFG_SUCR_SYS_REF_SEL_N_SHIFT 17
#define SYSCFG_SUCR_SYS_REF_SEL_N_MASK 0x00020000
#define SYSCFG_SUCR_SRST_STAT_SHIFT 4
#define SYSCFG_SUCR_SRST_STAT_MASK 0x00000010
#define SYSCFG_SUCR_BOOTSRC_SHIFT 2
#define SYSCFG_SUCR_BOOTSRC_MASK 0x00000004
#define SYSCFG_IOCR2_OFFSET 0x0000000C
#define SYSCFG_IOCR2_TYPE UInt32
#define SYSCFG_IOCR2_RESERVED_MASK 0x00003000
#define SYSCFG_IOCR2_SD2DAT_PULL_SHIFT 30
#define SYSCFG_IOCR2_SD2DAT_PULL_MASK 0xC0000000
#define SYSCFG_IOCR2_SD2CMD_PULL_SHIFT 28
#define SYSCFG_IOCR2_SD2CMD_PULL_MASK 0x30000000
#define SYSCFG_IOCR2_SD1DAT_PULL_SHIFT 26
#define SYSCFG_IOCR2_SD1DAT_PULL_MASK 0x0C000000
#define SYSCFG_IOCR2_SD1CMD_PULL_SHIFT 24
#define SYSCFG_IOCR2_SD1CMD_PULL_MASK 0x03000000
#define SYSCFG_IOCR2_SD3DAT_PULL_SHIFT 22
#define SYSCFG_IOCR2_SD3DAT_PULL_MASK 0x00C00000
#define SYSCFG_IOCR2_GPEN8_MUX_HI_SHIFT 21
#define SYSCFG_IOCR2_GPEN8_MUX_HI_MASK 0x00200000
#define SYSCFG_IOCR2_SD3CMD_PULL_SHIFT 19
#define SYSCFG_IOCR2_SD3CMD_PULL_MASK 0x00180000
#define SYSCFG_IOCR2_ANA_SYSCLKEN_MUX_SHIFT 18
#define SYSCFG_IOCR2_ANA_SYSCLKEN_MUX_MASK 0x00040000
#define SYSCFG_IOCR2_SOFTRSTO_MUX_SHIFT 16
#define SYSCFG_IOCR2_SOFTRSTO_MUX_MASK 0x00030000
#define SYSCFG_IOCR2_OTGCTRL1_MUX_SHIFT 14
#define SYSCFG_IOCR2_OTGCTRL1_MUX_MASK 0x0000C000
#define SYSCFG_IOCR2_LCDDATA_PULL_SHIFT 10
#define SYSCFG_IOCR2_LCDDATA_PULL_MASK 0x00000C00
#define SYSCFG_IOCR2_LCDTE_PULL_SHIFT 8
#define SYSCFG_IOCR2_LCDTE_PULL_MASK 0x00000300
#define SYSCFG_IOCR2_SIM2DAT_HYS_SHIFT 7
#define SYSCFG_IOCR2_SIM2DAT_HYS_MASK 0x00000080
#define SYSCFG_IOCR2_SIMDAT_HYS_SHIFT 6
#define SYSCFG_IOCR2_SIMDAT_HYS_MASK 0x00000040
#define SYSCFG_IOCR2_OSC2_SELECT_SHIFT 4
#define SYSCFG_IOCR2_OSC2_SELECT_MASK 0x00000030
#define SYSCFG_IOCR2_OSC2_ENABLE_SHIFT 3
#define SYSCFG_IOCR2_OSC2_ENABLE_MASK 0x00000008
#define SYSCFG_IOCR2_OSC1_SELECT_SHIFT 1
#define SYSCFG_IOCR2_OSC1_SELECT_MASK 0x00000006
#define SYSCFG_IOCR2_OSC1_ENABLE_SHIFT 0
#define SYSCFG_IOCR2_OSC1_ENABLE_MASK 0x00000001
#define SYSCFG_PIDR_OFFSET 0x00000010
#define SYSCFG_PIDR_TYPE UInt32
#define SYSCFG_PIDR_RESERVED_MASK 0xFFFFF000
#define SYSCFG_PIDR_PFID_SHIFT 8
#define SYSCFG_PIDR_PFID_MASK 0x00000F00
#define SYSCFG_PIDR_PID_SHIFT 4
#define SYSCFG_PIDR_PID_MASK 0x000000F0
#define SYSCFG_PIDR_RID_SHIFT 0
#define SYSCFG_PIDR_RID_MASK 0x0000000F
#define SYSCFG_DSPCTRL_OFFSET 0x00000014
#define SYSCFG_DSPCTRL_TYPE UInt32
#define SYSCFG_DSPCTRL_RESERVED_MASK 0xFFFFFE0F
#define SYSCFG_DSPCTRL_AUDIOSRST_SHIFT 8
#define SYSCFG_DSPCTRL_AUDIOSRST_MASK 0x00000100
#define SYSCFG_DSPCTRL_DSPSRST_SHIFT 7
#define SYSCFG_DSPCTRL_DSPSRST_MASK 0x00000080
#define SYSCFG_DSPCTRL_SYNCEXTPRAM_SHIFT 6
#define SYSCFG_DSPCTRL_SYNCEXTPRAM_MASK 0x00000040
#define SYSCFG_DSPCTRL_JTAGINTWAKE_SHIFT 5
#define SYSCFG_DSPCTRL_JTAGINTWAKE_MASK 0x00000020
#define SYSCFG_DSPCTRL_EN_TRST_SHIFT 4
#define SYSCFG_DSPCTRL_EN_TRST_MASK 0x00000010
#define SYSCFG_PUMR_OFFSET 0x00000018
#define SYSCFG_PUMR_TYPE UInt32
#define SYSCFG_PUMR_RESERVED_MASK 0x00000000
#define SYSCFG_PUMR_PUMODE_SHIFT 0
#define SYSCFG_PUMR_PUMODE_MASK 0xFFFFFFFF
#define SYSCFG_IOCR3_OFFSET 0x0000001C
#define SYSCFG_IOCR3_TYPE UInt32
#define SYSCFG_IOCR3_RESERVED_MASK 0x82054008
#define SYSCFG_IOCR3_PCMDI_PD_SHIFT 30
#define SYSCFG_IOCR3_PCMDI_PD_MASK 0x40000000
#define SYSCFG_IOCR3_DIGMICDATA_PD_SHIFT 29
#define SYSCFG_IOCR3_DIGMICDATA_PD_MASK 0x20000000
#define SYSCFG_IOCR3_SIM2DAT_PU_SHIFT 28
#define SYSCFG_IOCR3_SIM2DAT_PU_MASK 0x10000000
#define SYSCFG_IOCR3_SIMDAT_PU_SHIFT 27
#define SYSCFG_IOCR3_SIMDAT_PU_MASK 0x08000000
#define SYSCFG_IOCR3_X_TRIG_EN_SHIFT 26
#define SYSCFG_IOCR3_X_TRIG_EN_MASK 0x04000000
#define SYSCFG_IOCR3_WCDMA_UART_DIS_SHIFT 24
#define SYSCFG_IOCR3_WCDMA_UART_DIS_MASK 0x01000000
#define SYSCFG_IOCR3_SIM2_DIS_SHIFT 23
#define SYSCFG_IOCR3_SIM2_DIS_MASK 0x00800000
#define SYSCFG_IOCR3_UARTC_DIS_SHIFT 22
#define SYSCFG_IOCR3_UARTC_DIS_MASK 0x00400000
#define SYSCFG_IOCR3_TWIF_ENB_SHIFT 21
#define SYSCFG_IOCR3_TWIF_ENB_MASK 0x00200000
#define SYSCFG_IOCR3_PC_DIS_SHIFT 20
#define SYSCFG_IOCR3_PC_DIS_MASK 0x00100000
#define SYSCFG_IOCR3_SYN_DIS_SHIFT 19
#define SYSCFG_IOCR3_SYN_DIS_MASK 0x00080000
#define SYSCFG_IOCR3_GPEN_DIS_SHIFT 17
#define SYSCFG_IOCR3_GPEN_DIS_MASK 0x00020000
#define SYSCFG_IOCR3_JTAG_DIS_SHIFT 15
#define SYSCFG_IOCR3_JTAG_DIS_MASK 0x00008000
#define SYSCFG_IOCR3_SIM_DIS_SHIFT 13
#define SYSCFG_IOCR3_SIM_DIS_MASK 0x00002000
#define SYSCFG_IOCR3_UARTB_DIS_SHIFT 12
#define SYSCFG_IOCR3_UARTB_DIS_MASK 0x00001000
#define SYSCFG_IOCR3_UARTA_DIS_SHIFT 11
#define SYSCFG_IOCR3_UARTA_DIS_MASK 0x00000800
#define SYSCFG_IOCR3_CAMD_PD_SHIFT 10
#define SYSCFG_IOCR3_CAMD_PD_MASK 0x00000400
#define SYSCFG_IOCR3_CAMD_PU_SHIFT 9
#define SYSCFG_IOCR3_CAMD_PU_MASK 0x00000200
#define SYSCFG_IOCR3_CAMHVS_PD_SHIFT 8
#define SYSCFG_IOCR3_CAMHVS_PD_MASK 0x00000100
#define SYSCFG_IOCR3_CAMHVS_PU_SHIFT 7
#define SYSCFG_IOCR3_CAMHVS_PU_MASK 0x00000080
#define SYSCFG_IOCR3_CAMDCK_PD_SHIFT 6
#define SYSCFG_IOCR3_CAMDCK_PD_MASK 0x00000040
#define SYSCFG_IOCR3_CAMDCK_PU_SHIFT 5
#define SYSCFG_IOCR3_CAMDCK_PU_MASK 0x00000020
#define SYSCFG_IOCR3_CAMCK_DIS_SHIFT 4
#define SYSCFG_IOCR3_CAMCK_DIS_MASK 0x00000010
#define SYSCFG_IOCR3_NRDY_PU_SHIFT 2
#define SYSCFG_IOCR3_NRDY_PU_MASK 0x00000004
#define SYSCFG_IOCR3_FRDY_PU_SHIFT 1
#define SYSCFG_IOCR3_FRDY_PU_MASK 0x00000002
#define SYSCFG_IOCR3_FADQ_PU_SHIFT 0
#define SYSCFG_IOCR3_FADQ_PU_MASK 0x00000001
#define SYSCFG_IOCR4_OFFSET 0x00000020
#define SYSCFG_IOCR4_TYPE UInt32
#define SYSCFG_IOCR4_RESERVED_MASK 0x40000000
#define SYSCFG_IOCR4_FADQ_PD_SHIFT 31
#define SYSCFG_IOCR4_FADQ_PD_MASK 0x80000000
#define SYSCFG_IOCR4_SDIO_DRIVE_SHIFT 27
#define SYSCFG_IOCR4_SDIO_DRIVE_MASK 0x38000000
#define SYSCFG_IOCR4_ETM_DRIVE_SHIFT 24
#define SYSCFG_IOCR4_ETM_DRIVE_MASK 0x07000000
#define SYSCFG_IOCR4_LCD_DRIVE_SHIFT 21
#define SYSCFG_IOCR4_LCD_DRIVE_MASK 0x00E00000
#define SYSCFG_IOCR4_SIM_DRIVE_SHIFT 18
#define SYSCFG_IOCR4_SIM_DRIVE_MASK 0x001C0000
#define SYSCFG_IOCR4_SIM2_DRIVE_SHIFT 15
#define SYSCFG_IOCR4_SIM2_DRIVE_MASK 0x00038000
#define SYSCFG_IOCR4_CAMERA_DRIVE_SHIFT 12
#define SYSCFG_IOCR4_CAMERA_DRIVE_MASK 0x00007000
#define SYSCFG_IOCR4_SDIO3_CLK_DRIVE_SHIFT 9
#define SYSCFG_IOCR4_SDIO3_CLK_DRIVE_MASK 0x00000E00
#define SYSCFG_IOCR4_SDIO3_DRIVE_SHIFT 6
#define SYSCFG_IOCR4_SDIO3_DRIVE_MASK 0x000001C0
#define SYSCFG_IOCR4_FCLK_DRIVE_SHIFT 3
#define SYSCFG_IOCR4_FCLK_DRIVE_MASK 0x00000038
#define SYSCFG_IOCR4_MEM_DRIVE_SHIFT 0
#define SYSCFG_IOCR4_MEM_DRIVE_MASK 0x00000007
#define SYSCFG_IOCR5_OFFSET 0x00000024
#define SYSCFG_IOCR5_TYPE UInt32
#define SYSCFG_IOCR5_RESERVED_MASK 0x06003000
#define SYSCFG_IOCR5_JTAG_DRIVE_SHIFT 29
#define SYSCFG_IOCR5_JTAG_DRIVE_MASK 0xE0000000
#define SYSCFG_IOCR5_TRACEBUS_SELECT_SHIFT 27
#define SYSCFG_IOCR5_TRACEBUS_SELECT_MASK 0x18000000
#define SYSCFG_IOCR5_SIM2_GPIO_MUX_SHIFT 24
#define SYSCFG_IOCR5_SIM2_GPIO_MUX_MASK 0x01000000
#define SYSCFG_IOCR5_GPIO27_MUX_SHIFT 22
#define SYSCFG_IOCR5_GPIO27_MUX_MASK 0x00C00000
#define SYSCFG_IOCR5_GPIO26_MUX_SHIFT 20
#define SYSCFG_IOCR5_GPIO26_MUX_MASK 0x00300000
#define SYSCFG_IOCR5_GPIOH_DRIVE_SHIFT 17
#define SYSCFG_IOCR5_GPIOH_DRIVE_MASK 0x000E0000
#define SYSCFG_IOCR5_GPIOL_DRIVE_SHIFT 14
#define SYSCFG_IOCR5_GPIOL_DRIVE_MASK 0x0001C000
#define SYSCFG_IOCR5_GPIO35_MUX_SHIFT 10
#define SYSCFG_IOCR5_GPIO35_MUX_MASK 0x00000C00
#define SYSCFG_IOCR5_GPIO34_MUX_SHIFT 8
#define SYSCFG_IOCR5_GPIO34_MUX_MASK 0x00000300
#define SYSCFG_IOCR5_GPIO33_MUX_SHIFT 6
#define SYSCFG_IOCR5_GPIO33_MUX_MASK 0x000000C0
#define SYSCFG_IOCR5_GPIO32_MUX_SHIFT 4
#define SYSCFG_IOCR5_GPIO32_MUX_MASK 0x00000030
#define SYSCFG_IOCR5_GPIO31_MUX_SHIFT 2
#define SYSCFG_IOCR5_GPIO31_MUX_MASK 0x0000000C
#define SYSCFG_IOCR5_GPIO30_MUX_SHIFT 0
#define SYSCFG_IOCR5_GPIO30_MUX_MASK 0x00000003
#define SYSCFG_IOCR6_OFFSET 0x00000028
#define SYSCFG_IOCR6_TYPE UInt32
#define SYSCFG_IOCR6_RESERVED_MASK 0x20300380
#define SYSCFG_IOCR6_GPIO21_18_MUX_SHIFT 31
#define SYSCFG_IOCR6_GPIO21_18_MUX_MASK 0x80000000
#define SYSCFG_IOCR6_CAM2_CAM1_B_SHIFT 30
#define SYSCFG_IOCR6_CAM2_CAM1_B_MASK 0x40000000
#define SYSCFG_IOCR6_CAM_MODE_SHIFT 27
#define SYSCFG_IOCR6_CAM_MODE_MASK 0x18000000
#define SYSCFG_IOCR6_DDAC_FC_PWRDN_SHIFT 25
#define SYSCFG_IOCR6_DDAC_FC_PWRDN_MASK 0x06000000
#define SYSCFG_IOCR6_SPI_DRIVE_STRENGTH_CONTROL_SHIFT 22
#define SYSCFG_IOCR6_SPI_DRIVE_STRENGTH_CONTROL_MASK 0x01C00000
#define SYSCFG_IOCR6_GPIO25_24_MUX_SHIFT 19
#define SYSCFG_IOCR6_GPIO25_24_MUX_MASK 0x00080000
#define SYSCFG_IOCR6_UART_DRIVE_SHIFT 16
#define SYSCFG_IOCR6_UART_DRIVE_MASK 0x00070000
#define SYSCFG_IOCR6_PCM_DRIVE_SHIFT 13
#define SYSCFG_IOCR6_PCM_DRIVE_MASK 0x0000E000
#define SYSCFG_IOCR6_SDIO_CLK_DRIVE_SHIFT 10
#define SYSCFG_IOCR6_SDIO_CLK_DRIVE_MASK 0x00001C00
#define SYSCFG_IOCR6_GPIO23_MUX_SHIFT 6
#define SYSCFG_IOCR6_GPIO23_MUX_MASK 0x00000040
#define SYSCFG_IOCR6_GPIO22_MUX_SHIFT 5
#define SYSCFG_IOCR6_GPIO22_MUX_MASK 0x00000020
#define SYSCFG_IOCR6_GPIO21_MUX_SHIFT 4
#define SYSCFG_IOCR6_GPIO21_MUX_MASK 0x00000010
#define SYSCFG_IOCR6_GPIO20_MUX_SHIFT 3
#define SYSCFG_IOCR6_GPIO20_MUX_MASK 0x00000008
#define SYSCFG_IOCR6_JTAG_PM_MONITOR_MUX_SHIFT 2
#define SYSCFG_IOCR6_JTAG_PM_MONITOR_MUX_MASK 0x00000004
#define SYSCFG_IOCR6_GPIO_PM_MONITOR_MUX_SHIFT 1
#define SYSCFG_IOCR6_GPIO_PM_MONITOR_MUX_MASK 0x00000002
#define SYSCFG_IOCR6_PM_MONITOR_SELF_MUX_SHIFT 0
#define SYSCFG_IOCR6_PM_MONITOR_SELF_MUX_MASK 0x00000001
#define SYSCFG_IOCR7_OFFSET 0x0000002C
#define SYSCFG_IOCR7_TYPE UInt32
#define SYSCFG_IOCR7_RESERVED_MASK 0x0800004E
#define SYSCFG_IOCR7_RFGPIO5_MUX_SHIFT 31
#define SYSCFG_IOCR7_RFGPIO5_MUX_MASK 0x80000000
#define SYSCFG_IOCR7_RFGPIO4_MUX_SHIFT 30
#define SYSCFG_IOCR7_RFGPIO4_MUX_MASK 0x40000000
#define SYSCFG_IOCR7_RFGPIO3_MUX_SHIFT 29
#define SYSCFG_IOCR7_RFGPIO3_MUX_MASK 0x20000000
#define SYSCFG_IOCR7_RFGPIO2_MUX_SHIFT 28
#define SYSCFG_IOCR7_RFGPIO2_MUX_MASK 0x10000000
#define SYSCFG_IOCR7_PC_DRIVE_SHIFT 24
#define SYSCFG_IOCR7_PC_DRIVE_MASK 0x07000000
#define SYSCFG_IOCR7_GPENH_DRIVE_SHIFT 21
#define SYSCFG_IOCR7_GPENH_DRIVE_MASK 0x00E00000
#define SYSCFG_IOCR7_GPENL_DRIVE_SHIFT 18
#define SYSCFG_IOCR7_GPENL_DRIVE_MASK 0x001C0000
#define SYSCFG_IOCR7_RX3G_PULL_SHIFT 16
#define SYSCFG_IOCR7_RX3G_PULL_MASK 0x00030000
#define SYSCFG_IOCR7_TX3G_SLEW_SHIFT 15
#define SYSCFG_IOCR7_TX3G_SLEW_MASK 0x00008000
#define SYSCFG_IOCR7_TX3G_DRIVE_SHIFT 12
#define SYSCFG_IOCR7_TX3G_DRIVE_MASK 0x00007000
#define SYSCFG_IOCR7_CLKX8_SLEW_SHIFT 11
#define SYSCFG_IOCR7_CLKX8_SLEW_MASK 0x00000800
#define SYSCFG_IOCR7_CLKX8_DRIVE_SHIFT 8
#define SYSCFG_IOCR7_CLKX8_DRIVE_MASK 0x00000700
#define SYSCFG_IOCR7_AUXDAC0_PAVRAMP_SEL_SHIFT 7
#define SYSCFG_IOCR7_AUXDAC0_PAVRAMP_SEL_MASK 0x00000080
#define SYSCFG_IOCR7_DATASEL_3G2G_SHIFT 4
#define SYSCFG_IOCR7_DATASEL_3G2G_MASK 0x00000030
#define SYSCFG_IOCR7_CTRLSEL_3G2G_SHIFT 0
#define SYSCFG_IOCR7_CTRLSEL_3G2G_MASK 0x00000001
#define SYSCFG_DPERSTR_OFFSET 0x00000030
#define SYSCFG_DPERSTR_TYPE UInt32
#define SYSCFG_DPERSTR_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_DPERSTR_DPESRST_SHIFT 0
#define SYSCFG_DPERSTR_DPESRST_MASK 0x00000001
#define SYSCFG_TVENCCR_OFFSET 0x00000034
#define SYSCFG_TVENCCR_TYPE UInt32
#define SYSCFG_TVENCCR_RESERVED_MASK 0xFFFFFFC8
#define SYSCFG_TVENCCR_DMA_WAIT_CYCLE_SHIFT 4
#define SYSCFG_TVENCCR_DMA_WAIT_CYCLE_MASK 0x00000030
#define SYSCFG_TVENCCR_DISPLAYC_HRSTN_SHIFT 2
#define SYSCFG_TVENCCR_DISPLAYC_HRSTN_MASK 0x00000004
#define SYSCFG_TVENCCR_VEC_HRSTN_SHIFT 1
#define SYSCFG_TVENCCR_VEC_HRSTN_MASK 0x00000002
#define SYSCFG_TVENCCR_TVENC_EN_SHIFT 0
#define SYSCFG_TVENCCR_TVENC_EN_MASK 0x00000001
#define SYSCFG_DSICR_OFFSET 0x00000038
#define SYSCFG_DSICR_TYPE UInt32
#define SYSCFG_DSICR_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_DSICR_DSI_EN_SHIFT 0
#define SYSCFG_DSICR_DSI_EN_MASK 0x00000001
#define SYSCFG_MCR_OFFSET 0x00000040
#define SYSCFG_MCR_TYPE UInt32
#define SYSCFG_MCR_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_MCR_REMAP_SHIFT 0
#define SYSCFG_MCR_REMAP_MASK 0x00000001
#define SYSCFG_MRR_OFFSET 0x00000044
#define SYSCFG_MRR_TYPE UInt32
#define SYSCFG_MRR_RESERVED_MASK 0x00000000
#define SYSCFG_MRR_RESTORE_SHIFT 0
#define SYSCFG_MRR_RESTORE_MASK 0xFFFFFFFF
#define SYSCFG_RAMCTRL_OFFSET 0x00000048
#define SYSCFG_RAMCTRL_TYPE UInt32
#define SYSCFG_RAMCTRL_RESERVED_MASK 0xFFFEF800
#define SYSCFG_RAMCTRL_RED_OTP_RST_SHIFT 16
#define SYSCFG_RAMCTRL_RED_OTP_RST_MASK 0x00010000
#define SYSCFG_RAMCTRL_ROM_TM_SHIFT 6
#define SYSCFG_RAMCTRL_ROM_TM_MASK 0x000007C0
#define SYSCFG_RAMCTRL_SRAM_TM_SHIFT 2
#define SYSCFG_RAMCTRL_SRAM_TM_MASK 0x0000003C
#define SYSCFG_RAMCTRL_STBY_SHIFT 0
#define SYSCFG_RAMCTRL_STBY_MASK 0x00000003
#define SYSCFG_SECCTRL_OFFSET 0x00000050
#define SYSCFG_SECCTRL_TYPE UInt32
#define SYSCFG_SECCTRL_RESERVED_MASK 0xFFFFFFE8
#define SYSCFG_SECCTRL_CRYPTO_DIS_SHIFT 4
#define SYSCFG_SECCTRL_CRYPTO_DIS_MASK 0x00000010
#define SYSCFG_SECCTRL_OTP_DIS_SHIFT 2
#define SYSCFG_SECCTRL_OTP_DIS_MASK 0x00000004
#define SYSCFG_SECCTRL_RTC_DIS_WR_SHIFT 1
#define SYSCFG_SECCTRL_RTC_DIS_WR_MASK 0x00000002
#define SYSCFG_SECCTRL_BRM_DIS_RD_SHIFT 0
#define SYSCFG_SECCTRL_BRM_DIS_RD_MASK 0x00000001
#define SYSCFG_SECSTAT_OFFSET 0x00000054
#define SYSCFG_SECSTAT_TYPE UInt32
#define SYSCFG_SECSTAT_RESERVED_MASK 0xC0000000
#define SYSCFG_SECSTAT_SEC_MODE_STATE_SHIFT 26
#define SYSCFG_SECSTAT_SEC_MODE_STATE_MASK 0x3C000000
#define SYSCFG_SECSTAT_JTAG_DIS_SHIFT 25
#define SYSCFG_SECSTAT_JTAG_DIS_MASK 0x02000000
#define SYSCFG_SECSTAT_ETM_DIS_SHIFT 24
#define SYSCFG_SECSTAT_ETM_DIS_MASK 0x01000000
#define SYSCFG_SECSTAT_SECURE_DEBUG_SHIFT 0
#define SYSCFG_SECSTAT_SECURE_DEBUG_MASK 0x00FFFFFF
#define SYSCFG_MAMBACR_OFFSET 0x00000060
#define SYSCFG_MAMBACR_TYPE UInt32
#define SYSCFG_MAMBACR_RESERVED_MASK 0x00FFF401
#define SYSCFG_MAMBACR_MAMBA_STATUS_SHIFT 24
#define SYSCFG_MAMBACR_MAMBA_STATUS_MASK 0xFF000000
#define SYSCFG_MAMBACR_NOR_REQUEST_SHIFT 11
#define SYSCFG_MAMBACR_NOR_REQUEST_MASK 0x00000800
#define SYSCFG_MAMBACR_PWRDN_EXIT_SHIFT 9
#define SYSCFG_MAMBACR_PWRDN_EXIT_MASK 0x00000200
#define SYSCFG_MAMBACR_HIB_EXIT_MODE_SHIFT 8
#define SYSCFG_MAMBACR_HIB_EXIT_MODE_MASK 0x00000100
#define SYSCFG_MAMBACR_FREQ_CHANGE_REQ_SHIFT 7
#define SYSCFG_MAMBACR_FREQ_CHANGE_REQ_MASK 0x00000080
#define SYSCFG_MAMBACR_EMI_TEST_SHIFT 6
#define SYSCFG_MAMBACR_EMI_TEST_MASK 0x00000040
#define SYSCFG_MAMBACR_DEBUG_CTRL_SHIFT 2
#define SYSCFG_MAMBACR_DEBUG_CTRL_MASK 0x0000003C
#define SYSCFG_MAMBACR_CLK_EMI_EQ_DRAM_SHIFT 1
#define SYSCFG_MAMBACR_CLK_EMI_EQ_DRAM_MASK 0x00000002
#define SYSCFG_RFCR_OFFSET 0x00000064
#define SYSCFG_RFCR_TYPE UInt32
#define SYSCFG_RFCR_RESERVED_MASK 0xFFFFFFFF
#define SYSCFG_ANACR0_OFFSET 0x00000080
#define SYSCFG_ANACR0_TYPE UInt32
#define SYSCFG_ANACR0_RESERVED_MASK 0x00000000
#define SYSCFG_ANACR0_CLKINV_SHIFT 31
#define SYSCFG_ANACR0_CLKINV_MASK 0x80000000
#define SYSCFG_ANACR0_PGA_CTRLR_SHIFT 28
#define SYSCFG_ANACR0_PGA_CTRLR_MASK 0x70000000
#define SYSCFG_ANACR0_PGA_CTRLL_SHIFT 25
#define SYSCFG_ANACR0_PGA_CTRLL_MASK 0x0E000000
#define SYSCFG_ANACR0_MUTE_SELR_SHIFT 23
#define SYSCFG_ANACR0_MUTE_SELR_MASK 0x01800000
#define SYSCFG_ANACR0_MUTE_SELL_SHIFT 21
#define SYSCFG_ANACR0_MUTE_SELL_MASK 0x00600000
#define SYSCFG_ANACR0_MODE_SELR_SHIFT 18
#define SYSCFG_ANACR0_MODE_SELR_MASK 0x001C0000
#define SYSCFG_ANACR0_MODE_SELL_SHIFT 15
#define SYSCFG_ANACR0_MODE_SELL_MASK 0x00038000
#define SYSCFG_ANACR0_DRVR_SELR_SHIFT 14
#define SYSCFG_ANACR0_DRVR_SELR_MASK 0x00004000
#define SYSCFG_ANACR0_DRVR_SELL_SHIFT 13
#define SYSCFG_ANACR0_DRVR_SELL_MASK 0x00002000
#define SYSCFG_ANACR0_CM_SEL_SHIFT 10
#define SYSCFG_ANACR0_CM_SEL_MASK 0x00001C00
#define SYSCFG_ANACR0_IQ_DBL_SHIFT 9
#define SYSCFG_ANACR0_IQ_DBL_MASK 0x00000200
#define SYSCFG_ANACR0_BGTCSP_SHIFT 6
#define SYSCFG_ANACR0_BGTCSP_MASK 0x000001C0
#define SYSCFG_ANACR0_PWRDNREF_SHIFT 5
#define SYSCFG_ANACR0_PWRDNREF_MASK 0x00000020
#define SYSCFG_ANACR0_PWRDNDRVR_SHIFT 4
#define SYSCFG_ANACR0_PWRDNDRVR_MASK 0x00000010
#define SYSCFG_ANACR0_PWRDNDRVL_SHIFT 3
#define SYSCFG_ANACR0_PWRDNDRVL_MASK 0x00000008
#define SYSCFG_ANACR0_PWRDNDACR_SHIFT 2
#define SYSCFG_ANACR0_PWRDNDACR_MASK 0x00000004
#define SYSCFG_ANACR0_PWRDNDACL_SHIFT 1
#define SYSCFG_ANACR0_PWRDNDACL_MASK 0x00000002
#define SYSCFG_ANACR0_PWRDND2C_SHIFT 0
#define SYSCFG_ANACR0_PWRDND2C_MASK 0x00000001
#define SYSCFG_ANACR1_OFFSET 0x00000084
#define SYSCFG_ANACR1_TYPE UInt32
#define SYSCFG_ANACR1_RESERVED_MASK 0x03FFFE7C
#define SYSCFG_ANACR1_IHFEP_DAC_PDOFFSETGENL_SHIFT 31
#define SYSCFG_ANACR1_IHFEP_DAC_PDOFFSETGENL_MASK 0x80000000
#define SYSCFG_ANACR1_IHFEP_DAC_PDOFFSETGENR_SHIFT 30
#define SYSCFG_ANACR1_IHFEP_DAC_PDOFFSETGENR_MASK 0x40000000
#define SYSCFG_ANACR1_IHFEP_DAC_PWRMOS_GATECNTRLL_SHIFT 29
#define SYSCFG_ANACR1_IHFEP_DAC_PWRMOS_GATECNTRLL_MASK 0x20000000
#define SYSCFG_ANACR1_IHFEP_DAC_PWRMOS_GATECNTRLR_SHIFT 28
#define SYSCFG_ANACR1_IHFEP_DAC_PWRMOS_GATECNTRLR_MASK 0x10000000
#define SYSCFG_ANACR1_IHFEP_DAC_OFFSETDBL_SHIFT 27
#define SYSCFG_ANACR1_IHFEP_DAC_OFFSETDBL_MASK 0x08000000
#define SYSCFG_ANACR1_IHFEP_DAC_RAMPREF_EN_SHIFT 26
#define SYSCFG_ANACR1_IHFEP_DAC_RAMPREF_EN_MASK 0x04000000
#define SYSCFG_ANACR1_HEADSET_DAC_PWD_MASK_SHIFT 8
#define SYSCFG_ANACR1_HEADSET_DAC_PWD_MASK_MASK 0x00000100
#define SYSCFG_ANACR1_IHF_EARPIECE_DAC_FORCE_PWRUP_DISABLE_SHIFT 7
#define SYSCFG_ANACR1_IHF_EARPIECE_DAC_FORCE_PWRUP_DISABLE_MASK 0x00000080
#define SYSCFG_ANACR1_I_CLKPHASE_SHIFT 1
#define SYSCFG_ANACR1_I_CLKPHASE_MASK 0x00000002
#define SYSCFG_ANACR1_I_CLKPHSDIS_SHIFT 0
#define SYSCFG_ANACR1_I_CLKPHSDIS_MASK 0x00000001
#define SYSCFG_ANACR2_OFFSET 0x00000088
#define SYSCFG_ANACR2_TYPE UInt32
#define SYSCFG_ANACR2_RESERVED_MASK 0x00070030
#define SYSCFG_ANACR2_I_PGA_BIAS_GM0_SHIFT 31
#define SYSCFG_ANACR2_I_PGA_BIAS_GM0_MASK 0x80000000
#define SYSCFG_ANACR2_I_PGA_BIAS_BUF_SHIFT 29
#define SYSCFG_ANACR2_I_PGA_BIAS_BUF_MASK 0x60000000
#define SYSCFG_ANACR2_I_PGA_BIAS_AMP_SHIFT 27
#define SYSCFG_ANACR2_I_PGA_BIAS_AMP_MASK 0x18000000
#define SYSCFG_ANACR2_I_PGA_RI_FINE_SHIFT 24
#define SYSCFG_ANACR2_I_PGA_RI_FINE_MASK 0x07000000
#define SYSCFG_ANACR2_I_PGA_RI_CTL_SHIFT 22
#define SYSCFG_ANACR2_I_PGA_RI_CTL_MASK 0x00C00000
#define SYSCFG_ANACR2_I_BGTC_SHIFT 19
#define SYSCFG_ANACR2_I_BGTC_MASK 0x00380000
#define SYSCFG_ANACR2_I_LDO_VOUT_SHIFT 14
#define SYSCFG_ANACR2_I_LDO_VOUT_MASK 0x0000C000
#define SYSCFG_ANACR2_I_PLL_TESTSEL_SHIFT 13
#define SYSCFG_ANACR2_I_PLL_TESTSEL_MASK 0x00002000
#define SYSCFG_ANACR2_REFSHIFT_SHIFT 11
#define SYSCFG_ANACR2_REFSHIFT_MASK 0x00001800
#define SYSCFG_ANACR2_BGTC_SHIFT 8
#define SYSCFG_ANACR2_BGTC_MASK 0x00000700
#define SYSCFG_ANACR2_I_LDO_I_SHIFT 6
#define SYSCFG_ANACR2_I_LDO_I_MASK 0x000000C0
#define SYSCFG_ANACR2_I_PGA_ADC_STANDBY_SHIFT 3
#define SYSCFG_ANACR2_I_PGA_ADC_STANDBY_MASK 0x00000008
#define SYSCFG_ANACR2_I_PGA_ADC_PWRUP_SHIFT 2
#define SYSCFG_ANACR2_I_PGA_ADC_PWRUP_MASK 0x00000004
#define SYSCFG_ANACR2_I_MIC_VOICE_PWRDN_SHIFT 1
#define SYSCFG_ANACR2_I_MIC_VOICE_PWRDN_MASK 0x00000002
#define SYSCFG_ANACR2_I_LDO_PWRDN_SHIFT 0
#define SYSCFG_ANACR2_I_LDO_PWRDN_MASK 0x00000001
#define SYSCFG_ANACR3_OFFSET 0x0000008C
#define SYSCFG_ANACR3_TYPE UInt32
#define SYSCFG_ANACR3_RESERVED_MASK 0x00000000
#define SYSCFG_ANACR3_I_INTVOCMCT_SHIFT 29
#define SYSCFG_ANACR3_I_INTVOCMCT_MASK 0xE0000000
#define SYSCFG_ANACR3_I_INT2BCTL_SHIFT 27
#define SYSCFG_ANACR3_I_INT2BCTL_MASK 0x18000000
#define SYSCFG_ANACR3_I_INT1BCTL_SHIFT 25
#define SYSCFG_ANACR3_I_INT1BCTL_MASK 0x06000000
#define SYSCFG_ANACR3_I_FLASHBCTL_SHIFT 23
#define SYSCFG_ANACR3_I_FLASHBCTL_MASK 0x01800000
#define SYSCFG_ANACR3_I_DITHCTL_SHIFT 21
#define SYSCFG_ANACR3_I_DITHCTL_MASK 0x00600000
#define SYSCFG_ANACR3_I_DACREFCTL_SHIFT 18
#define SYSCFG_ANACR3_I_DACREFCTL_MASK 0x001C0000
#define SYSCFG_ANACR3_I_CKBY2EN_SHIFT 17
#define SYSCFG_ANACR3_I_CKBY2EN_MASK 0x00020000
#define SYSCFG_ANACR3_ARX_FORCE_PWRUP_DISABLE_SHIFT 16
#define SYSCFG_ANACR3_ARX_FORCE_PWRUP_DISABLE_MASK 0x00010000
#define SYSCFG_ANACR3_I_PGA_MUX_SEL_SHIFT 15
#define SYSCFG_ANACR3_I_PGA_MUX_SEL_MASK 0x00008000
#define SYSCFG_ANACR3_I_PGA_GAIN_SHIFT 9
#define SYSCFG_ANACR3_I_PGA_GAIN_MASK 0x00007E00
#define SYSCFG_ANACR3_I_PGA_CMO_CTL_SHIFT 6
#define SYSCFG_ANACR3_I_PGA_CMO_CTL_MASK 0x000001C0
#define SYSCFG_ANACR3_I_PGA_CMI_CTL_SHIFT 3
#define SYSCFG_ANACR3_I_PGA_CMI_CTL_MASK 0x00000038
#define SYSCFG_ANACR3_I_PGA_BIAS_VBIAS_SHIFT 1
#define SYSCFG_ANACR3_I_PGA_BIAS_VBIAS_MASK 0x00000006
#define SYSCFG_ANACR3_I_PGA_BIAS_GM1_SHIFT 0
#define SYSCFG_ANACR3_I_PGA_BIAS_GM1_MASK 0x00000001
#define SYSCFG_ANACR4_OFFSET 0x00000090
#define SYSCFG_ANACR4_TYPE UInt32
#define SYSCFG_ANACR4_RESERVED_MASK 0xFFFFFFE0
#define SYSCFG_ANACR4_I_MIC_AUX_GND_SHIFT 4
#define SYSCFG_ANACR4_I_MIC_AUX_GND_MASK 0x00000010
#define SYSCFG_ANACR4_I_SHUFFCTL_SHIFT 2
#define SYSCFG_ANACR4_I_SHUFFCTL_MASK 0x0000000C
#define SYSCFG_ANACR4_I_REFAMPBCTL_SHIFT 0
#define SYSCFG_ANACR4_I_REFAMPBCTL_MASK 0x00000003
#define SYSCFG_ANACR5_OFFSET 0x00000094
#define SYSCFG_ANACR5_TYPE UInt32
#define SYSCFG_ANACR5_RESERVED_MASK 0x3F81F800
#define SYSCFG_ANACR5_BGTC_SHIFT 30
#define SYSCFG_ANACR5_BGTC_MASK 0xC0000000
#define SYSCFG_ANACR5_AUX_ADC_SC_SHIFT 21
#define SYSCFG_ANACR5_AUX_ADC_SC_MASK 0x00600000
#define SYSCFG_ANACR5_AUX_DAC_SI_SHIFT 19
#define SYSCFG_ANACR5_AUX_DAC_SI_MASK 0x00180000
#define SYSCFG_ANACR5_AUX_DAC0_SC_SHIFT 17
#define SYSCFG_ANACR5_AUX_DAC0_SC_MASK 0x00060000
#define SYSCFG_ANACR5_AUX_DAC_CM_SHIFT 7
#define SYSCFG_ANACR5_AUX_DAC_CM_MASK 0x00000780
#define SYSCFG_ANACR5_AUX_DAC_PD_SHIFT 6
#define SYSCFG_ANACR5_AUX_DAC_PD_MASK 0x00000040
#define SYSCFG_ANACR5_AUX_DAC_IBIAS_SHIFT 4
#define SYSCFG_ANACR5_AUX_DAC_IBIAS_MASK 0x00000030
#define SYSCFG_ANACR5_AUX_CLK_CTRL_SHIFT 3
#define SYSCFG_ANACR5_AUX_CLK_CTRL_MASK 0x00000008
#define SYSCFG_ANACR5_AUX_CLK_INV_SHIFT 2
#define SYSCFG_ANACR5_AUX_CLK_INV_MASK 0x00000004
#define SYSCFG_ANACR5_AUX_OUTPUT_SHIFT 1
#define SYSCFG_ANACR5_AUX_OUTPUT_MASK 0x00000002
#define SYSCFG_ANACR5_PWD_AUX_DAC_SHIFT 0
#define SYSCFG_ANACR5_PWD_AUX_DAC_MASK 0x00000001
#define SYSCFG_ANACR6_OFFSET 0x00000098
#define SYSCFG_ANACR6_TYPE UInt32
#define SYSCFG_ANACR6_RESERVED_MASK 0xFFF9F800
#define SYSCFG_ANACR6_AUXDAC1SC_SHIFT 17
#define SYSCFG_ANACR6_AUXDAC1SC_MASK 0x00060000
#define SYSCFG_ANACR6_AUX_DAC_CM_SHIFT 7
#define SYSCFG_ANACR6_AUX_DAC_CM_MASK 0x00000780
#define SYSCFG_ANACR6_AUX_DAC_PD_SHIFT 6
#define SYSCFG_ANACR6_AUX_DAC_PD_MASK 0x00000040
#define SYSCFG_ANACR6_AUX_DAC_IBIAS_SHIFT 4
#define SYSCFG_ANACR6_AUX_DAC_IBIAS_MASK 0x00000030
#define SYSCFG_ANACR6_AUX_CLK_CTRL_SHIFT 3
#define SYSCFG_ANACR6_AUX_CLK_CTRL_MASK 0x00000008
#define SYSCFG_ANACR6_AUX_CLK_INV_SHIFT 2
#define SYSCFG_ANACR6_AUX_CLK_INV_MASK 0x00000004
#define SYSCFG_ANACR6_AUX_OUTPUT_SHIFT 1
#define SYSCFG_ANACR6_AUX_OUTPUT_MASK 0x00000002
#define SYSCFG_ANACR6_PWD_AUX_DAC_SHIFT 0
#define SYSCFG_ANACR6_PWD_AUX_DAC_MASK 0x00000001
#define SYSCFG_ANACR7_OFFSET 0x0000009C
#define SYSCFG_ANACR7_TYPE UInt32
#define SYSCFG_ANACR7_RESERVED_MASK 0xA0000000
#define SYSCFG_ANACR7_RXSC_SHIFT 30
#define SYSCFG_ANACR7_RXSC_MASK 0x40000000
#define SYSCFG_ANACR7_DISABLEHVREGULATOR_SHIFT 28
#define SYSCFG_ANACR7_DISABLEHVREGULATOR_MASK 0x10000000
#define SYSCFG_ANACR7_INPUTCMENABLE_SHIFT 27
#define SYSCFG_ANACR7_INPUTCMENABLE_MASK 0x08000000
#define SYSCFG_ANACR7_ALBSELECT_SHIFT 26
#define SYSCFG_ANACR7_ALBSELECT_MASK 0x04000000
#define SYSCFG_ANACR7_SHUFFLECTRL_SHIFT 24
#define SYSCFG_ANACR7_SHUFFLECTRL_MASK 0x03000000
#define SYSCFG_ANACR7_DITHCTRL_SHIFT 22
#define SYSCFG_ANACR7_DITHCTRL_MASK 0x00C00000
#define SYSCFG_ANACR7_VCMCTRL_FLASH_SHIFT 20
#define SYSCFG_ANACR7_VCMCTRL_FLASH_MASK 0x00300000
#define SYSCFG_ANACR7_VCMCTRL_INT2_SHIFT 18
#define SYSCFG_ANACR7_VCMCTRL_INT2_MASK 0x000C0000
#define SYSCFG_ANACR7_VCMCTRL_INT1_SHIFT 16
#define SYSCFG_ANACR7_VCMCTRL_INT1_MASK 0x00030000
#define SYSCFG_ANACR7_DITHREFOFFSETCTRL_SHIFT 14
#define SYSCFG_ANACR7_DITHREFOFFSETCTRL_MASK 0x0000C000
#define SYSCFG_ANACR7_ADCREFOFFSETCTRL_SHIFT 12
#define SYSCFG_ANACR7_ADCREFOFFSETCTRL_MASK 0x00003000
#define SYSCFG_ANACR7_BIASCTRL_INT2_SHIFT 10
#define SYSCFG_ANACR7_BIASCTRL_INT2_MASK 0x00000C00
#define SYSCFG_ANACR7_BIASCTRL_INT1_SHIFT 8
#define SYSCFG_ANACR7_BIASCTRL_INT1_MASK 0x00000300
#define SYSCFG_ANACR7_BIASCTRL_ADC_SHIFT 6
#define SYSCFG_ANACR7_BIASCTRL_ADC_MASK 0x000000C0
#define SYSCFG_ANACR7_RXPGASET_SHIFT 2
#define SYSCFG_ANACR7_RXPGASET_MASK 0x0000003C
#define SYSCFG_ANACR7_RXRESETB_SHIFT 1
#define SYSCFG_ANACR7_RXRESETB_MASK 0x00000002
#define SYSCFG_ANACR7_RXPWRDN_SHIFT 0
#define SYSCFG_ANACR7_RXPWRDN_MASK 0x00000001
#define SYSCFG_ANACR8_OFFSET 0x000000A0
#define SYSCFG_ANACR8_TYPE UInt32
#define SYSCFG_ANACR8_RESERVED_MASK 0xFC400000
#define SYSCFG_ANACR8_IBCASCTRL_SHIFT 23
#define SYSCFG_ANACR8_IBCASCTRL_MASK 0x03800000
#define SYSCFG_ANACR8_IBCMCTRL_SHIFT 20
#define SYSCFG_ANACR8_IBCMCTRL_MASK 0x00300000
#define SYSCFG_ANACR8_IBAMPCTRL_SHIFT 17
#define SYSCFG_ANACR8_IBAMPCTRL_MASK 0x000E0000
#define SYSCFG_ANACR8_TXAMPCTRL_SHIFT 13
#define SYSCFG_ANACR8_TXAMPCTRL_MASK 0x0001E000
#define SYSCFG_ANACR8_TXVCMCTRL_SHIFT 10
#define SYSCFG_ANACR8_TXVCMCTRL_MASK 0x00001C00
#define SYSCFG_ANACR8_CLK_DISABLE_SHIFT 9
#define SYSCFG_ANACR8_CLK_DISABLE_MASK 0x00000200
#define SYSCFG_ANACR8_CLKINV_SHIFT 8
#define SYSCFG_ANACR8_CLKINV_MASK 0x00000100
#define SYSCFG_ANACR8_TXOBB_SHIFT 7
#define SYSCFG_ANACR8_TXOBB_MASK 0x00000080
#define SYSCFG_ANACR8_TXSC_SHIFT 5
#define SYSCFG_ANACR8_TXSC_MASK 0x00000060
#define SYSCFG_ANACR8_TXSIQ_SHIFT 3
#define SYSCFG_ANACR8_TXSIQ_MASK 0x00000018
#define SYSCFG_ANACR8_TXSII_SHIFT 1
#define SYSCFG_ANACR8_TXSII_MASK 0x00000006
#define SYSCFG_ANACR8_PWRDNTX_SHIFT 0
#define SYSCFG_ANACR8_PWRDNTX_MASK 0x00000001
#define SYSCFG_ANACR9_OFFSET 0x000000A4
#define SYSCFG_ANACR9_TYPE UInt32
#define SYSCFG_ANACR9_RESERVED_MASK 0x00001C00
#define SYSCFG_ANACR9_SYNC_DET_LENGTH_SHIFT 29
#define SYSCFG_ANACR9_SYNC_DET_LENGTH_MASK 0xE0000000
#define SYSCFG_ANACR9_TX_PHASE_SHIFT 28
#define SYSCFG_ANACR9_TX_PHASE_MASK 0x10000000
#define SYSCFG_ANACR9_AFE_CHRPTEN_SHIFT 27
#define SYSCFG_ANACR9_AFE_CHRPTEN_MASK 0x08000000
#define SYSCFG_ANACR9_AFE_LPBACK_SHIFT 26
#define SYSCFG_ANACR9_AFE_LPBACK_MASK 0x04000000
#define SYSCFG_ANACR9_AFE_CDRCKEN_SHIFT 25
#define SYSCFG_ANACR9_AFE_CDRCKEN_MASK 0x02000000
#define SYSCFG_ANACR9_FS_LS_CROSS_OVER_SHIFT 24
#define SYSCFG_ANACR9_FS_LS_CROSS_OVER_MASK 0x01000000
#define SYSCFG_ANACR9_AFE_HSTXEN_SHIFT 23
#define SYSCFG_ANACR9_AFE_HSTXEN_MASK 0x00800000
#define SYSCFG_ANACR9_ECN_ENABLE_SHIFT 22
#define SYSCFG_ANACR9_ECN_ENABLE_MASK 0x00400000
#define SYSCFG_ANACR9_UTMI_LOOPBACK_SHIFT 21
#define SYSCFG_ANACR9_UTMI_LOOPBACK_MASK 0x00200000
#define SYSCFG_ANACR9_AFE_RXLOGICR_SHIFT 20
#define SYSCFG_ANACR9_AFE_RXLOGICR_MASK 0x00100000
#define SYSCFG_ANACR9_IOST_CONTROL_SHIFT 18
#define SYSCFG_ANACR9_IOST_CONTROL_MASK 0x000C0000
#define SYSCFG_ANACR9_SUSPEND_PLL_PDN_SHIFT 17
#define SYSCFG_ANACR9_SUSPEND_PLL_PDN_MASK 0x00020000
#define SYSCFG_ANACR9_PLL_LOCK_DIS_SHIFT 16
#define SYSCFG_ANACR9_PLL_LOCK_DIS_MASK 0x00010000
#define SYSCFG_ANACR9_IDDQ_EN_SHIFT 15
#define SYSCFG_ANACR9_IDDQ_EN_MASK 0x00008000
#define SYSCFG_ANACR9_UTMI_DISCON_PHY_SHIFT 14
#define SYSCFG_ANACR9_UTMI_DISCON_PHY_MASK 0x00004000
#define SYSCFG_ANACR9_PLL_BYPASS_SHIFT 13
#define SYSCFG_ANACR9_PLL_BYPASS_MASK 0x00002000
#define SYSCFG_ANACR9_RESET_HI_PLL_SHIFT 9
#define SYSCFG_ANACR9_RESET_HI_PLL_MASK 0x00000200
#define SYSCFG_ANACR9_UTMI_L1_SUSPENDM_SHIFT 8
#define SYSCFG_ANACR9_UTMI_L1_SUSPENDM_MASK 0x00000100
#define SYSCFG_ANACR9_AFE_NON_DRIVING_SHIFT 7
#define SYSCFG_ANACR9_AFE_NON_DRIVING_MASK 0x00000080
#define SYSCFG_ANACR9_UTMI_SLEEPM_SHIFT 6
#define SYSCFG_ANACR9_UTMI_SLEEPM_MASK 0x00000040
#define SYSCFG_ANACR9_SOFT_RESETB_SHIFT 5
#define SYSCFG_ANACR9_SOFT_RESETB_MASK 0x00000020
#define SYSCFG_ANACR9_PLL_PWRDWNB_SHIFT 4
#define SYSCFG_ANACR9_PLL_PWRDWNB_MASK 0x00000010
#define SYSCFG_ANACR9_PLL_CALEN_SHIFT 3
#define SYSCFG_ANACR9_PLL_CALEN_MASK 0x00000008
#define SYSCFG_ANACR9_OTG_MODE_SHIFT 2
#define SYSCFG_ANACR9_OTG_MODE_MASK 0x00000004
#define SYSCFG_ANACR9_HOSTB_DEV_SHIFT 1
#define SYSCFG_ANACR9_HOSTB_DEV_MASK 0x00000002
#define SYSCFG_ANACR9_PHY_PWRDWNB_SHIFT 0
#define SYSCFG_ANACR9_PHY_PWRDWNB_MASK 0x00000001
#define SYSCFG_ANACR10_OFFSET 0x000000A8
#define SYSCFG_ANACR10_TYPE UInt32
#define SYSCFG_ANACR10_RESERVED_MASK 0x80203FC0
#define SYSCFG_ANACR10_I_CKDIV_SHIFT 29
#define SYSCFG_ANACR10_I_CKDIV_MASK 0x60000000
#define SYSCFG_ANACR10_I_CKADJ_SHIFT 25
#define SYSCFG_ANACR10_I_CKADJ_MASK 0x1E000000
#define SYSCFG_ANACR10_I_BGTC_SHIFT 22
#define SYSCFG_ANACR10_I_BGTC_MASK 0x01C00000
#define SYSCFG_ANACR10_I_CKSEL_SHIFT 20
#define SYSCFG_ANACR10_I_CKSEL_MASK 0x00100000
#define SYSCFG_ANACR10_I_CLKINV_SHIFT 19
#define SYSCFG_ANACR10_I_CLKINV_MASK 0x00080000
#define SYSCFG_ANACR10_CLKPHASEDIS_SHIFT 18
#define SYSCFG_ANACR10_CLKPHASEDIS_MASK 0x00040000
#define SYSCFG_ANACR10_CLKTEST_SHIFT 17
#define SYSCFG_ANACR10_CLKTEST_MASK 0x00020000
#define SYSCFG_ANACR10_CLKPHASE_SHIFT 16
#define SYSCFG_ANACR10_CLKPHASE_MASK 0x00010000
#define SYSCFG_ANACR10_CLKRESET_SHIFT 15
#define SYSCFG_ANACR10_CLKRESET_MASK 0x00008000
#define SYSCFG_ANACR10_BUFBYPASS_SHIFT 14
#define SYSCFG_ANACR10_BUFBYPASS_MASK 0x00004000
#define SYSCFG_ANACR10_PWRDNREF_SHIFT 5
#define SYSCFG_ANACR10_PWRDNREF_MASK 0x00000020
#define SYSCFG_ANACR10_PWRDNDRVR_SHIFT 4
#define SYSCFG_ANACR10_PWRDNDRVR_MASK 0x00000010
#define SYSCFG_ANACR10_PWRDNDRVL_SHIFT 3
#define SYSCFG_ANACR10_PWRDNDRVL_MASK 0x00000008
#define SYSCFG_ANACR10_PWRDNDACR_SHIFT 2
#define SYSCFG_ANACR10_PWRDNDACR_MASK 0x00000004
#define SYSCFG_ANACR10_PWRDNDACL_SHIFT 1
#define SYSCFG_ANACR10_PWRDNDACL_MASK 0x00000002
#define SYSCFG_ANACR10_PWRDND2C_SHIFT 0
#define SYSCFG_ANACR10_PWRDND2C_MASK 0x00000001
#define SYSCFG_ANACR11_OFFSET 0x000000AC
#define SYSCFG_ANACR11_TYPE UInt32
#define SYSCFG_ANACR11_RESERVED_MASK 0xFFFFFFFF
#define SYSCFG_ANACR12_OFFSET 0x000000B0
#define SYSCFG_ANACR12_TYPE UInt32
#define SYSCFG_ANACR12_RESERVED_MASK 0xFFFF8080
#define SYSCFG_ANACR12_MICINVAL_SHIFT 8
#define SYSCFG_ANACR12_MICINVAL_MASK 0x00007F00
#define SYSCFG_ANACR12_MICONVAL_SHIFT 0
#define SYSCFG_ANACR12_MICONVAL_MASK 0x0000007F
#define SYSCFG_ANACR13_OFFSET 0x000000B4
#define SYSCFG_ANACR13_TYPE UInt32
#define SYSCFG_ANACR13_RESERVED_MASK 0xFFFFFFFF
#define SYSCFG_ANACR14_OFFSET 0x000000B8
#define SYSCFG_ANACR14_TYPE UInt32
#define SYSCFG_ANACR14_RESERVED_MASK 0xFFFFFFFF
#define SYSCFG_ANACR15_OFFSET 0x000000BC
#define SYSCFG_ANACR15_TYPE UInt32
#define SYSCFG_ANACR15_RESERVED_MASK 0xFFFFFFFF
#define SYSCFG_IRDROP_MON0_OFFSET 0x000000C0
#define SYSCFG_IRDROP_MON0_TYPE UInt32
#define SYSCFG_IRDROP_MON0_RESERVED_MASK 0xFFFFF000
#define SYSCFG_IRDROP_MON0_OSC_EN_SHIFT 11
#define SYSCFG_IRDROP_MON0_OSC_EN_MASK 0x00000800
#define SYSCFG_IRDROP_MON0_MON_EN_SHIFT 10
#define SYSCFG_IRDROP_MON0_MON_EN_MASK 0x00000400
#define SYSCFG_IRDROP_MON0_CNT_OUT_SHIFT 0
#define SYSCFG_IRDROP_MON0_CNT_OUT_MASK 0x000003FF
#define SYSCFG_IRDROP_MON1_OFFSET 0x000000C4
#define SYSCFG_IRDROP_MON1_TYPE UInt32
#define SYSCFG_IRDROP_MON1_RESERVED_MASK 0xFFFFF000
#define SYSCFG_IRDROP_MON1_OSC_EN_SHIFT 11
#define SYSCFG_IRDROP_MON1_OSC_EN_MASK 0x00000800
#define SYSCFG_IRDROP_MON1_MON_EN_SHIFT 10
#define SYSCFG_IRDROP_MON1_MON_EN_MASK 0x00000400
#define SYSCFG_IRDROP_MON1_CNT_OUT_SHIFT 0
#define SYSCFG_IRDROP_MON1_CNT_OUT_MASK 0x000003FF
#define SYSCFG_IRDROP_MON2_OFFSET 0x000000C8
#define SYSCFG_IRDROP_MON2_TYPE UInt32
#define SYSCFG_IRDROP_MON2_RESERVED_MASK 0xFFFFF000
#define SYSCFG_IRDROP_MON2_OSC_EN_SHIFT 11
#define SYSCFG_IRDROP_MON2_OSC_EN_MASK 0x00000800
#define SYSCFG_IRDROP_MON2_MON_EN_SHIFT 10
#define SYSCFG_IRDROP_MON2_MON_EN_MASK 0x00000400
#define SYSCFG_IRDROP_MON2_CNT_OUT_SHIFT 0
#define SYSCFG_IRDROP_MON2_CNT_OUT_MASK 0x000003FF
#define SYSCFG_MDIO_WRITE_OFFSET 0x000000CC
#define SYSCFG_MDIO_WRITE_TYPE UInt32
#define SYSCFG_MDIO_WRITE_RESERVED_MASK 0x00000000
#define SYSCFG_MDIO_WRITE_WRITE_SHIFT 31
#define SYSCFG_MDIO_WRITE_WRITE_MASK 0x80000000
#define SYSCFG_MDIO_WRITE_READ_SHIFT 30
#define SYSCFG_MDIO_WRITE_READ_MASK 0x40000000
#define SYSCFG_MDIO_WRITE_MDIO3_SM_SEL_SHIFT 29
#define SYSCFG_MDIO_WRITE_MDIO3_SM_SEL_MASK 0x20000000
#define SYSCFG_MDIO_WRITE_MDIO2_SHIFT 24
#define SYSCFG_MDIO_WRITE_MDIO2_MASK 0x1F000000
#define SYSCFG_MDIO_WRITE_MDIO1_SHIFT 16
#define SYSCFG_MDIO_WRITE_MDIO1_MASK 0x00FF0000
#define SYSCFG_MDIO_WRITE_REG_SHIFT 0
#define SYSCFG_MDIO_WRITE_REG_MASK 0x0000FFFF
#define SYSCFG_MDIO_READ_OFFSET 0x000000D0
#define SYSCFG_MDIO_READ_TYPE UInt32
#define SYSCFG_MDIO_READ_RESERVED_MASK 0xFFFC0000
#define SYSCFG_MDIO_READ_VBUS_STAT2_SHIFT 17
#define SYSCFG_MDIO_READ_VBUS_STAT2_MASK 0x00020000
#define SYSCFG_MDIO_READ_VBUS_STAT1_SHIFT 16
#define SYSCFG_MDIO_READ_VBUS_STAT1_MASK 0x00010000
#define SYSCFG_MDIO_READ_REG_RD_DATA_SHIFT 0
#define SYSCFG_MDIO_READ_REG_RD_DATA_MASK 0x0000FFFF
#define SYSCFG_PERIPH_AHB_CLK_GATE_MASK_OFFSET 0x000000D8
#define SYSCFG_PERIPH_AHB_CLK_GATE_MASK_TYPE UInt32
#define SYSCFG_PERIPH_AHB_CLK_GATE_MASK_RESERVED_MASK 0x00000000
#define SYSCFG_PERIPH_AHB_CLK_GATE_MASK_PERIPH_AHB_CLK_GATE_MASK_SHIFT 0
#define SYSCFG_PERIPH_AHB_CLK_GATE_MASK_PERIPH_AHB_CLK_GATE_MASK_MASK 0xFFFFFFFF
#define SYSCFG_PERIPH_AHB_CLK_GATE_FORCE_OFFSET 0x000000DC
#define SYSCFG_PERIPH_AHB_CLK_GATE_FORCE_TYPE UInt32
#define SYSCFG_PERIPH_AHB_CLK_GATE_FORCE_RESERVED_MASK 0x00000000
#define SYSCFG_PERIPH_AHB_CLK_GATE_FORCE_PERIPH_AHB_CLK_GATE_FORCE_SHIFT 0
#define SYSCFG_PERIPH_AHB_CLK_GATE_FORCE_PERIPH_AHB_CLK_GATE_FORCE_MASK 0xFFFFFFFF
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW_OFFSET 0x000000E0
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW_TYPE UInt32
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW_RESERVED_MASK 0x00000000
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW_PERIPH_AHB_CLK_GATE_MON_RAW_SHIFT 0
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW_PERIPH_AHB_CLK_GATE_MON_RAW_MASK 0xFFFFFFFF
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_OFFSET 0x000000E4
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_TYPE UInt32
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RESERVED_MASK 0x00000000
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_PERIPH_AHB_CLK_GATE_MON_SHIFT 0
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_PERIPH_AHB_CLK_GATE_MON_MASK 0xFFFFFFFF
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN_OFFSET 0x00000100
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN_OFFSET 0x00000104
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_USB_AHB_CLK_EN_OFFSET 0x00000108
#define SYSCFG_PERIPH_USB_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_USB_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_USB_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_USB_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN_OFFSET 0x0000010C
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN_OFFSET 0x00000110
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN_OFFSET 0x00000114
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN_OFFSET 0x00000118
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN_OFFSET 0x0000011C
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_DA_AHB_CLK_EN_OFFSET 0x00000120
#define SYSCFG_PERIPH_DA_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_DA_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_DA_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_DA_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN_OFFSET 0x00000124
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN_OFFSET 0x0000012C
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN_OFFSET 0x00000130
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN_OFFSET 0x00000134
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN_OFFSET 0x00000138
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN_OFFSET 0x0000013C
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_DES_AHB_CLK_EN_OFFSET 0x00000144
#define SYSCFG_PERIPH_DES_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_DES_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_DES_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_DES_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN_OFFSET 0x0000014C
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN_OFFSET 0x00000150
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN_OFFSET 0x00000154
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN_OFFSET 0x0000015C
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN_OFFSET 0x00000160
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_SYSCFG_DMAC_AHB_CLK_MODE_OFFSET 0x00000164
#define SYSCFG_SYSCFG_DMAC_AHB_CLK_MODE_TYPE UInt32
#define SYSCFG_SYSCFG_DMAC_AHB_CLK_MODE_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_SYSCFG_DMAC_AHB_CLK_MODE_EN_SHIFT 0
#define SYSCFG_SYSCFG_DMAC_AHB_CLK_MODE_EN_MASK 0x00000001
#define SYSCFG_PERIPH_HUCM_FW_CLK_EN_OFFSET 0x00000168
#define SYSCFG_PERIPH_HUCM_FW_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_HUCM_FW_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_HUCM_FW_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_HUCM_FW_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_PERIPH_HTM_CLK_EN_OFFSET 0x0000016C
#define SYSCFG_PERIPH_HTM_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_HTM_CLK_EN_RESERVED_MASK 0xFFFFFFF8
#define SYSCFG_PERIPH_HTM_CLK_EN_HTMH_HCLK_EN_SHIFT 2
#define SYSCFG_PERIPH_HTM_CLK_EN_HTMH_HCLK_EN_MASK 0x00000004
#define SYSCFG_PERIPH_HTM_CLK_EN_HTML_HCLK_EN_SHIFT 1
#define SYSCFG_PERIPH_HTM_CLK_EN_HTML_HCLK_EN_MASK 0x00000002
#define SYSCFG_PERIPH_HTM_CLK_EN_HTM_ATCLK_EN_SHIFT 0
#define SYSCFG_PERIPH_HTM_CLK_EN_HTM_ATCLK_EN_MASK 0x00000001
#define SYSCFG_TESTABILITY_ACCESS_OFFSET 0x00000170
#define SYSCFG_TESTABILITY_ACCESS_TYPE UInt32
#define SYSCFG_TESTABILITY_ACCESS_RESERVED_MASK 0xFFFFFFF0
#define SYSCFG_TESTABILITY_ACCESS_ETM_LOCK_SHIFT 3
#define SYSCFG_TESTABILITY_ACCESS_ETM_LOCK_MASK 0x00000008
#define SYSCFG_TESTABILITY_ACCESS_SBD_DISABLE_SHIFT 2
#define SYSCFG_TESTABILITY_ACCESS_SBD_DISABLE_MASK 0x00000004
#define SYSCFG_TESTABILITY_ACCESS_JTAG_DISABLE_LOCK_SHIFT 1
#define SYSCFG_TESTABILITY_ACCESS_JTAG_DISABLE_LOCK_MASK 0x00000002
#define SYSCFG_TESTABILITY_ACCESS_JTAG_DISABLE_SHIFT 0
#define SYSCFG_TESTABILITY_ACCESS_JTAG_DISABLE_MASK 0x00000001
#define SYSCFG_DISABLE_OTP_REGION_READ_ACCESS_OFFSET 0x00000174
#define SYSCFG_DISABLE_OTP_REGION_READ_ACCESS_TYPE UInt32
#define SYSCFG_DISABLE_OTP_REGION_READ_ACCESS_RESERVED_MASK 0x00000000
#define SYSCFG_DISABLE_OTP_REGION_READ_ACCESS_DIS_OTP_RGN_RD_N_SHIFT 0
#define SYSCFG_DISABLE_OTP_REGION_READ_ACCESS_DIS_OTP_RGN_RD_N_MASK 0xFFFFFFFF
#define SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS_OFFSET 0x00000178
#define SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS_TYPE UInt32
#define SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS_RESERVED_MASK 0x00000000
#define SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS_DIS_OTP_RGN_WR_N_SHIFT 0
#define SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS_DIS_OTP_RGN_WR_N_MASK 0xFFFFFFFF
#define SYSCFG_OTP_DEVICE_STATUS_OFFSET 0x0000017C
#define SYSCFG_OTP_DEVICE_STATUS_TYPE UInt32
#define SYSCFG_OTP_DEVICE_STATUS_RESERVED_MASK 0xFFFF0000
#define SYSCFG_OTP_DEVICE_STATUS_DEVICE_STATUS_SHIFT 0
#define SYSCFG_OTP_DEVICE_STATUS_DEVICE_STATUS_MASK 0x0000FFFF
#define SYSCFG_IRDROP_MON3_OFFSET 0x00000180
#define SYSCFG_IRDROP_MON3_TYPE UInt32
#define SYSCFG_IRDROP_MON3_RESERVED_MASK 0xFFFFF000
#define SYSCFG_IRDROP_MON3_OSC_EN_SHIFT 11
#define SYSCFG_IRDROP_MON3_OSC_EN_MASK 0x00000800
#define SYSCFG_IRDROP_MON3_MON_EN_SHIFT 10
#define SYSCFG_IRDROP_MON3_MON_EN_MASK 0x00000400
#define SYSCFG_IRDROP_MON3_CNT_OUT_SHIFT 0
#define SYSCFG_IRDROP_MON3_CNT_OUT_MASK 0x000003FF
#define SYSCFG_IRDROP_MON4_OFFSET 0x00000184
#define SYSCFG_IRDROP_MON4_TYPE UInt32
#define SYSCFG_IRDROP_MON4_RESERVED_MASK 0xFFFFF000
#define SYSCFG_IRDROP_MON4_OSC_EN_SHIFT 11
#define SYSCFG_IRDROP_MON4_OSC_EN_MASK 0x00000800
#define SYSCFG_IRDROP_MON4_MON_EN_SHIFT 10
#define SYSCFG_IRDROP_MON4_MON_EN_MASK 0x00000400
#define SYSCFG_IRDROP_MON4_CNT_OUT_SHIFT 0
#define SYSCFG_IRDROP_MON4_CNT_OUT_MASK 0x000003FF
#define SYSCFG_IRDROP_MON5_OFFSET 0x00000188
#define SYSCFG_IRDROP_MON5_TYPE UInt32
#define SYSCFG_IRDROP_MON5_RESERVED_MASK 0xFFFFF000
#define SYSCFG_IRDROP_MON5_OSC_EN_SHIFT 11
#define SYSCFG_IRDROP_MON5_OSC_EN_MASK 0x00000800
#define SYSCFG_IRDROP_MON5_MON_EN_SHIFT 10
#define SYSCFG_IRDROP_MON5_MON_EN_MASK 0x00000400
#define SYSCFG_IRDROP_MON5_CNT_OUT_SHIFT 0
#define SYSCFG_IRDROP_MON5_CNT_OUT_MASK 0x000003FF
#define SYSCFG_IRDROP_MON6_OFFSET 0x0000018C
#define SYSCFG_IRDROP_MON6_TYPE UInt32
#define SYSCFG_IRDROP_MON6_RESERVED_MASK 0xFFFFF000
#define SYSCFG_IRDROP_MON6_OSC_EN_SHIFT 11
#define SYSCFG_IRDROP_MON6_OSC_EN_MASK 0x00000800
#define SYSCFG_IRDROP_MON6_MON_EN_SHIFT 10
#define SYSCFG_IRDROP_MON6_MON_EN_MASK 0x00000400
#define SYSCFG_IRDROP_MON6_CNT_OUT_SHIFT 0
#define SYSCFG_IRDROP_MON6_CNT_OUT_MASK 0x000003FF
#define SYSCFG_PERIPH_CIPHER_FW_CLK_EN_OFFSET 0x00000190
#define SYSCFG_PERIPH_CIPHER_FW_CLK_EN_TYPE UInt32
#define SYSCFG_PERIPH_CIPHER_FW_CLK_EN_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_PERIPH_CIPHER_FW_CLK_EN_EN_SHIFT 0
#define SYSCFG_PERIPH_CIPHER_FW_CLK_EN_EN_MASK 0x00000001
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND0_OFFSET 0x000001A0
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND0_TYPE UInt32
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND0_RESERVED_MASK 0x00000000
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND0_SYSCONF_AHB_CLK_EXTEND0_SHIFT 0
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND0_SYSCONF_AHB_CLK_EXTEND0_MASK 0xFFFFFFFF
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND1_OFFSET 0x000001A4
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND1_TYPE UInt32
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND1_RESERVED_MASK 0x00000000
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND1_SYSCONF_AHB_CLK_EXTEND1_SHIFT 0
#define SYSCFG_SYSCONF_AHB_CLK_EXTEND1_SYSCONF_AHB_CLK_EXTEND1_MASK 0xFFFFFFFF
#define SYSCFG_OTP_CHIP_FEATURE_ID_OFFSET 0x000001C0
#define SYSCFG_OTP_CHIP_FEATURE_ID_TYPE UInt32
#define SYSCFG_OTP_CHIP_FEATURE_ID_RESERVED_MASK 0xFFFFFFF8
#define SYSCFG_OTP_CHIP_FEATURE_ID_CHIP_DIFF_OVERRIDE_SHIFT 2
#define SYSCFG_OTP_CHIP_FEATURE_ID_CHIP_DIFF_OVERRIDE_MASK 0x00000004
#define SYSCFG_OTP_CHIP_FEATURE_ID_CHIP_DIFF_ID_SHIFT 0
#define SYSCFG_OTP_CHIP_FEATURE_ID_CHIP_DIFF_ID_MASK 0x00000003
#define SYSCFG_OTP_WCDMA_CAT_OFFSET 0x000001C4
#define SYSCFG_OTP_WCDMA_CAT_TYPE UInt32
#define SYSCFG_OTP_WCDMA_CAT_RESERVED_MASK 0xFFFFFFE0
#define SYSCFG_OTP_WCDMA_CAT_PRISM_ENB_SHIFT 4
#define SYSCFG_OTP_WCDMA_CAT_PRISM_ENB_MASK 0x00000010
#define SYSCFG_OTP_WCDMA_CAT_HSDPA_ENB_SHIFT 3
#define SYSCFG_OTP_WCDMA_CAT_HSDPA_ENB_MASK 0x00000008
#define SYSCFG_OTP_WCDMA_CAT_HSDPA_CAT_SHIFT 2
#define SYSCFG_OTP_WCDMA_CAT_HSDPA_CAT_MASK 0x00000004
#define SYSCFG_OTP_WCDMA_CAT_HSUPA_ENB_SHIFT 1
#define SYSCFG_OTP_WCDMA_CAT_HSUPA_ENB_MASK 0x00000002
#define SYSCFG_OTP_WCDMA_CAT_HSUPA_CAT_SHIFT 0
#define SYSCFG_OTP_WCDMA_CAT_HSUPA_CAT_MASK 0x00000001
#define SYSCFG_OTP_MM_FEAT_CFG_OFFSET 0x000001C8
#define SYSCFG_OTP_MM_FEAT_CFG_TYPE UInt32
#define SYSCFG_OTP_MM_FEAT_CFG_RESERVED_MASK 0xFFFFFFF0
#define SYSCFG_OTP_MM_FEAT_CFG_SW_CAP_ISP_RSLN_SHIFT 3
#define SYSCFG_OTP_MM_FEAT_CFG_SW_CAP_ISP_RSLN_MASK 0x00000008
#define SYSCFG_OTP_MM_FEAT_CFG_AP_LMT_SPD_SHIFT 2
#define SYSCFG_OTP_MM_FEAT_CFG_AP_LMT_SPD_MASK 0x00000004
#define SYSCFG_OTP_MM_FEAT_CFG_SW_CAP_DSP_SPD_SHIFT 1
#define SYSCFG_OTP_MM_FEAT_CFG_SW_CAP_DSP_SPD_MASK 0x00000002
#define SYSCFG_OTP_MM_FEAT_CFG_SW_CAP_DSI_SPD_SHIFT 0
#define SYSCFG_OTP_MM_FEAT_CFG_SW_CAP_DSI_SPD_MASK 0x00000001
#define SYSCFG_OTP_MM_FEAT_DIS_OFFSET 0x000001CC
#define SYSCFG_OTP_MM_FEAT_DIS_TYPE UInt32
#define SYSCFG_OTP_MM_FEAT_DIS_RESERVED_MASK 0xFFFFFF80
#define SYSCFG_OTP_MM_FEAT_DIS_AP_DIS_SHIFT 6
#define SYSCFG_OTP_MM_FEAT_DIS_AP_DIS_MASK 0x00000040
#define SYSCFG_OTP_MM_FEAT_DIS_VID_DIS_SHIFT 5
#define SYSCFG_OTP_MM_FEAT_DIS_VID_DIS_MASK 0x00000020
#define SYSCFG_OTP_MM_FEAT_DIS_DPE_DIS_SHIFT 4
#define SYSCFG_OTP_MM_FEAT_DIS_DPE_DIS_MASK 0x00000010
#define SYSCFG_OTP_MM_FEAT_DIS_LCDC_DIS_SHIFT 3
#define SYSCFG_OTP_MM_FEAT_DIS_LCDC_DIS_MASK 0x00000008
#define SYSCFG_OTP_MM_FEAT_DIS_CAM_DIS_SHIFT 2
#define SYSCFG_OTP_MM_FEAT_DIS_CAM_DIS_MASK 0x00000004
#define SYSCFG_OTP_MM_FEAT_DIS_DSI_DIS_SHIFT 1
#define SYSCFG_OTP_MM_FEAT_DIS_DSI_DIS_MASK 0x00000002
#define SYSCFG_OTP_MM_FEAT_DIS_TVO_DIS_SHIFT 0
#define SYSCFG_OTP_MM_FEAT_DIS_TVO_DIS_MASK 0x00000001
#define SYSCFG_OTP_MAC_VIS_DIS_OFFSET 0x000001D0
#define SYSCFG_OTP_MAC_VIS_DIS_TYPE UInt32
#define SYSCFG_OTP_MAC_VIS_DIS_RESERVED_MASK 0xFFFFFFFE
#define SYSCFG_OTP_MAC_VIS_DIS_MACVIS_DIS_SHIFT 0
#define SYSCFG_OTP_MAC_VIS_DIS_MACVIS_DIS_MASK 0x00000001
#define SYSCFG_BRIDGE_INCR_EN_OFFSET 0x000001E0
#define SYSCFG_BRIDGE_INCR_EN_TYPE UInt32
#define SYSCFG_BRIDGE_INCR_EN_RESERVED_MASK 0xFFFF0000
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ZSM_RD_INCR_EN_SHIFT 15
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ZSM_RD_INCR_EN_MASK 0x00008000
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ZSM_WR_INCR_EN_SHIFT 14
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ZSM_WR_INCR_EN_MASK 0x00004000
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ICACHE_RD_INCR_EN_SHIFT 13
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ICACHE_RD_INCR_EN_MASK 0x00002000
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ICACHE_WR_INCR_EN_SHIFT 12
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_ICACHE_WR_INCR_EN_MASK 0x00001000
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_SLAVE_RD_INCR_EN_SHIFT 11
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_SLAVE_RD_INCR_EN_MASK 0x00000800
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_SLAVE_WR_INCR_EN_SHIFT 10
#define SYSCFG_BRIDGE_INCR_EN_DSP_ASYNC_BRG_SLAVE_WR_INCR_EN_MASK 0x00000400
#define SYSCFG_BRIDGE_INCR_EN_ML2MH_SYNC_BRG_RD_INCR_EN_SHIFT 9
#define SYSCFG_BRIDGE_INCR_EN_ML2MH_SYNC_BRG_RD_INCR_EN_MASK 0x00000200
#define SYSCFG_BRIDGE_INCR_EN_ML2MH_SYNC_BRG_WR_INCR_EN_SHIFT 8
#define SYSCFG_BRIDGE_INCR_EN_ML2MH_SYNC_BRG_WR_INCR_EN_MASK 0x00000100
#define SYSCFG_BRIDGE_INCR_EN_MH2ML_SYNC_BRG_RD_INCR_EN_SHIFT 7
#define SYSCFG_BRIDGE_INCR_EN_MH2ML_SYNC_BRG_RD_INCR_EN_MASK 0x00000080
#define SYSCFG_BRIDGE_INCR_EN_MH2ML_SYNC_BRG_WR_INCR_EN_SHIFT 6
#define SYSCFG_BRIDGE_INCR_EN_MH2ML_SYNC_BRG_WR_INCR_EN_MASK 0x00000040
#define SYSCFG_BRIDGE_INCR_EN_MA11D_SYNC_BRG_RD_INCR_EN_SHIFT 5
#define SYSCFG_BRIDGE_INCR_EN_MA11D_SYNC_BRG_RD_INCR_EN_MASK 0x00000020
#define SYSCFG_BRIDGE_INCR_EN_MA11D_SYNC_BRG_WR_INCR_EN_SHIFT 4
#define SYSCFG_BRIDGE_INCR_EN_MA11D_SYNC_BRG_WR_INCR_EN_MASK 0x00000010
#define SYSCFG_BRIDGE_INCR_EN_VID_ENC_SYNC_BRG_RD_INCR_EN_SHIFT 3
#define SYSCFG_BRIDGE_INCR_EN_VID_ENC_SYNC_BRG_RD_INCR_EN_MASK 0x00000008
#define SYSCFG_BRIDGE_INCR_EN_VID_ENC_SYNC_BRG_WR_INCR_EN_SHIFT 2
#define SYSCFG_BRIDGE_INCR_EN_VID_ENC_SYNC_BRG_WR_INCR_EN_MASK 0x00000004
#define SYSCFG_BRIDGE_INCR_EN_VID_DEC_SYNC_BRG_RD_INCR_EN_SHIFT 1
#define SYSCFG_BRIDGE_INCR_EN_VID_DEC_SYNC_BRG_RD_INCR_EN_MASK 0x00000002
#define SYSCFG_BRIDGE_INCR_EN_VID_DEC_SYNC_BRG_WR_INCR_EN_SHIFT 0
#define SYSCFG_BRIDGE_INCR_EN_VID_DEC_SYNC_BRG_WR_INCR_EN_MASK 0x00000001
#define SYSCFG_FPGA_VERSION_OFFSET 0x000001FC
#define SYSCFG_FPGA_VERSION_TYPE UInt32
#define SYSCFG_FPGA_VERSION_RESERVED_MASK 0xFFFFFF00
#define SYSCFG_FPGA_VERSION_FPGA_RLS_ID_SHIFT 0
#define SYSCFG_FPGA_VERSION_FPGA_RLS_ID_MASK 0x000000FF
//*****old style:
typedef volatile struct {
UInt32 m_SYSCFG_IOCR0; // 000
UInt32 m_SYSCFG_IOCR1; // 004
UInt32 m_SYSCFG_SUCR; // 008
UInt32 m_SYSCFG_IOCR2; // 00C
UInt32 m_SYSCFG_PIDR; // 010
UInt32 m_SYSCFG_DSPCTRL; // 014
UInt32 m_SYSCFG_PUMR; // 018
UInt32 m_SYSCFG_IOCR3; // 01C
UInt32 m_SYSCFG_IOCR4; // 020
UInt32 m_SYSCFG_IOCR5; // 024
UInt32 m_SYSCFG_IOCR6; // 028
UInt32 m_SYSCFG_IOCR7; // 02C
UInt32 m_RESERVED_030[1]; // 030
UInt32 m_SYSCFG_TVENCCR; // 034
UInt32 m_SYSCFG_DSICR; // 038
UInt32 m_RESERVED_03C[1]; // 03C
UInt32 m_SYSCFG_MCR; // 040
UInt32 m_SYSCFG_MRR; // 044
UInt32 m_SYSCFG_RAMCTRL; // 048
UInt32 m_RESERVED_04C[1]; // 04C
UInt32 m_SYSCFG_SECCTRL; // 050
UInt32 m_SYSCFG_SECSTAT; // 054
UInt32 m_SYSCFG_MARM11CR; // 058
UInt32 m_RESERVED_05C[1]; // 05C
UInt32 m_SYSCFG_MAMBACR; // 060
UInt32 m_SYSCFG_RFCR; // 064
UInt32 m_RESERVED_068[6]; // 068-080
UInt32 m_SYSCFG_ANACR0; // 080
UInt32 m_SYSCFG_ANACR1; // 084
UInt32 m_SYSCFG_ANACR2; // 088
UInt32 m_SYSCFG_ANACR3; // 08C
UInt32 m_SYSCFG_ANACR4; // 090
UInt32 m_SYSCFG_ANACR5; // 094
UInt32 m_SYSCFG_ANACR6; // 098
UInt32 m_SYSCFG_ANACR7; // 09C
UInt32 m_SYSCFG_ANACR8; // 0A0
UInt32 m_SYSCFG_ANACR9; // 0A4
UInt32 m_SYSCFG_ANACR10; // 0A8
UInt32 m_SYSCFG_ANACR11; // 0AC
UInt32 m_SYSCFG_ANACR12; // 0B0
UInt32 m_SYSCFG_ANACR13; // 0B4
UInt32 m_SYSCFG_ANACR14; // 0B8
UInt32 m_SYSCFG_ANACR15; // 0BC
UInt32 m_SYSCFG_IRDROP_MON0; // 0C0
UInt32 m_SYSCFG_IRDROP_MON1; // 0C4
UInt32 m_SYSCFG_IRDROP_MON2; // 0C8
UInt32 m_SYSCFG_MDIO_WRITE; // 0CC
UInt32 m_SYSCFG_MDIO_READ; // 0D0
UInt32 m_RESERVED_0D4[1]; // 0D4
UInt32 m_SYSCFG_PERIPH_AHB_CLK_GATE_MASK; // 0D8
UInt32 m_SYSCFG_PERIPH_AHB_CLK_GATE_FORCE; // 0DC
UInt32 m_SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW; // 0E0
UInt32 m_SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR; // 0E4
UInt32 m_RESERVED_0E8[6]; // 0E8-100
UInt32 m_SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN; // 100
UInt32 m_SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN; // 104
UInt32 m_SYSCFG_PERIPH_USB_AHB_CLK_EN; // 108
UInt32 m_SYSCFG_PERIPH_GEA_AHB_CLK_EN; // 10C
UInt32 m_SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN; // 110
UInt32 m_SYSCFG_PERIPH_PKA_AHB_CLK_EN; // 114
UInt32 m_SYSCFG_PERIPH_UARTA_AHB_CLK_EN; // 118
UInt32 m_SYSCFG_PERIPH_UARTB_AHB_CLK_EN; // 11C
UInt32 m_SYSCFG_PERIPH_DA_AHB_CLK_EN; // 120
UInt32 m_SYSCFG_PERIPH_MPCLK_AHB_CLK_EN; // 124
UInt32 m_RESERVED_128[1]; // 128
UInt32 m_SYSCFG_PERIPH_LCD_AHB_CLK_EN; // 12C
UInt32 m_SYSCFG_PERIPH_DPE_AHB_CLK_EN; // 130
UInt32 m_SYSCFG_PERIPH_DMAC_AHB_CLK_EN; // 134
UInt32 m_SYSCFG_PERIPH_SDIO1_AHB_CLK_EN; // 138
UInt32 m_SYSCFG_PERIPH_SDIO2_AHB_CLK_EN; // 13C
UInt32 m_RESERVED_140[1]; // 140
UInt32 m_SYSCFG_PERIPH_DES_AHB_CLK_EN; // 144
UInt32 m_RESERVED_148[1]; // 148
UInt32 m_SYSCFG_PERIPH_UARTC_AHB_CLK_EN; // 14C
UInt32 m_SYSCFG_PERIPH_RNG_AHB_CLK_EN; // 150
UInt32 m_SYSCFG_PERIPH_SDIO3_AHB_CLK_EN; // 154
UInt32 m_SYSCFG_PERIPH_TVENC_AHB_CLK_EN; // 158
UInt32 m_SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN; // 15C
UInt32 m_SYSCFG_PERIPH_MPHI_AHB_CLK_EN; // 160
UInt32 m_RESERVED_164[3]; // 164-170
UInt32 m_SYSCFG_TESTABILITY_ACCESS; // 170
UInt32 m_SYSCFG_DISABLE_OTP_REGION_READ_ACCESS; // 174
UInt32 m_SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS; // 178
UInt32 m_SYSCFG_OTP_DEVICE_STATUS; // 17C
UInt32 m_RESERVED_180[31]; // 180-1FC
UInt32 m_SYSCFG_FPGA_Version; // 1FC
} BCMRDB_SYSCFG_REGS;
/**** SYSCFG ****/
//#define SYSCFG_IOCR0 (SYSCFG_BASE_ADDR + 0x0000) /* IOCR0 bit I/O Configuration Register 0 */
#define SYSCFG_IOCR0_FLASH_GPIO_MUX 0x80000000 /* RW 0 FALE/FCLE, 1 GPIO[62:61], */
#define SYSCFG_IOCR0_FLASH_SD2_MUX 0x40000000 /* RW 0 FCLK,FADQ[8:0], 1 SD2, */
#define SYSCFG_IOCR0_LCD_CTRL_MUX 0x20000000 /* RW LCD/GPIO/MPHI, concatenated */
#define SYSCFG_IOCR0_LCDD1_LCDD15_MUX 0x10000000 /* RW LCD/CAM/MPHI, concatenated w */
#define SYSCFG_IOCR0_GPEN9_B1 0x08000000 /* RW {GPEN9_B1, GPEN9_B0}: GPEN9/ */
#define SYSCFG_IOCR0_DSP_test_port_enable 0x04000000 /* RW DSP/WCDMA muxout on LCD and */
#define SYSCFG_IOCR0_MPHI_mux 0x02000000 /* RW LCD/GPIO/MPHI, concatenated */
// #define SYSCFG_IOCR0_DIGMIC_MUX 0x01000000 /* RW DIGMIC/GPIO[63] Select, 0 DI */
// #define SYSCFG_IOCR0_PCM_MUX 0x00C00000 /* RW PCM/SPI2/GPIO[40:38] or Trac */
#define SYSCFG_IOCR0_SPI_MUX_HI 0x00200000 /* RW Pin mux for bit 11, */
#define SYSCFG_IOCR0_GPEN11_B1 0x00100000 /* RW {GPEN11_B1, GPEN11_B0}: GPEN */
#define SYSCFG_IOCR0_LCDD16_LCDD17_MUX 0x00080000 /* RW LCD/GPIO/MPHI, concatenated */
#define SYSCFG_IOCR0_AFCPDM_MUX 0x00040000 /* RW AFCPDM / CLK_MONITOR Pin Mux */
#define SYSCFG_IOCR0_GPEN11_B0 0x00020000 /* RW See description of GPEN11_B1 */
#define SYSCFG_IOCR0_GPEN10 0x00018000 /* RW GPEN/GPIO55 Select, 00 GPEN[ */
#define SYSCFG_IOCR0_GPEN9_B0 0x4000 /* RW See description of GPEN9_B1 */
#define SYSCFG_IOCR0_GPEN8_MUX 0x2000 /* RW GPEN/GPIO53 Select, Selectio */
#define SYSCFG_IOCR0_GPEN7 0x1000 /* RW GPEN/GPIO7 Select, 0 GPEN[7] */
#define SYSCFG_IOCR0_SPI_MUX 0x0800 /* RW SPI/UARTC/GPIO, Selection co */
#define SYSCFG_IOCR0_GPIO16_MUX 0x0400 /* RW Bit 10 used to control PWM0 */
#define SYSCFG_IOCR0_GPIO17_MUX 0x0200 /* RW Bit 9 used to control PWM1 o */
#define SYSCFG_IOCR0_GPIO_muxes 0x0180 /* RW GPIO[30:28] muxes, 00 GPIO[3 */
#define SYSCFG_IOCR0_I2S_MUX 0x0060 /* RW GPIO / I2S, 00 I2S, 01 I2S, */
#define SYSCFG_IOCR0_SD1_MUX 0x0018 /* RW 00 SD1, 01 SPI2, 10 2G-TWIF/ */
#define SYSCFG_IOCR0_M68 0x0004 /* RW Select M68 interface on LCD, */
#define SYSCFG_IOCR0_SD3_MUX 0x0003 /* RW GPIO/MSPRO/SD3, 00 SD3, 01 M */
#define SYSCFG_IOCR1 (SYSCFG_BASE_ADDR + 0x0004) /* IOCR1 bit I/O Configuration Register 1 */
#define SYSCFG_IOCR1_STRAP_PULLDOWN_CONTROL 0xFFFF0000 /* RW pull-down control for FADQ15 */
#define SYSCFG_IOCR1_KEY_COL 0xFF00 /* RW KEY_COL/GPIO, 0 GPIO[15:8] p */
#define SYSCFG_IOCR1_KEY_ROW 0x00FF /* RW KEY_ROW/GPIO, 0 GPIO[7:0] pi */
#define SYSCFG_SUCR (SYSCFG_BASE_ADDR + 0x0008) /* SUCR bit Start Up Mode Register */
#define SYSCFG_SUCR_SPARE 0x80000000 /* RO SPARE strap option, */
#define SYSCFG_SUCR_FLASH_BOOT 0x40000000 /* RO Flash Boot on (FADQ14), 0 Boo */
#define SYSCFG_SUCR_DOWNLOAD 0x20000000 /* RO Download (FADQ13), 0 Normal m */
#define SYSCFG_SUCR_AP 0x10000000 /* RO Audio Precision Testing (FADQ */
#define SYSCFG_SUCR_reserved0 0x0C000000 /* reserved0 */
#define SYSCFG_SUCR_NAND8 0x02000000 /* RO NANDFlash Bus Width (FADQ9), */
#define SYSCFG_SUCR_VCOBYPASS 0x01000000 /* RO VCO_BYPASS strap value (FADQ8 */
#define SYSCFG_SUCR_reserved1 0x00C00000 /* reserved1 */
#define SYSCFG_SUCR_EJTAG_SEL 0x00200000 /* RO Select LV TAP, strap value of */
#define SYSCFG_SUCR_JTAG_SEL 0x001C0000 /* RO JTAG Multi-Core daisy chain s */
#define SYSCFG_SUCR_reserved2 0x0003FFE0 /* reserved2 */
#define SYSCFG_SUCR_Srst_Stat 0x0010 /* RO Soft Reset Status:, 0 Hard re */
#define SYSCFG_SUCR_reserved3 0x0008 /* reserved3 */
#define SYSCFG_SUCR_BootSrc 0x0004 /* RO Boot Source Select: (READ-ONL */
#define SYSCFG_SUCR_reserved4 0x0002 /* reserved4 */
#define SYSCFG_SUCR_DLM 0x0001 /* RO same as DOWNLOAD bit above., */
#define SYSCFG_IOCR2 (SYSCFG_BASE_ADDR + 0x000C) /* IOCR2 bit I/O Configuration Register 2 */
#define SYSCFG_IOCR2_SD2DAT_PULL 0xC0000000 /* RW SD2DAT pull, 00 SD2DAT pad n */
#define SYSCFG_IOCR2_SD2CMD_PULL 0x30000000 /* RW SD2CMD pull, 00 SD2CMD pad n */
#define SYSCFG_IOCR2_SD2CK_PULL 0x0C000000 /* RW SD2CK pull, 00 SD2CK pad no */
#define SYSCFG_IOCR2_reserved0 0x03C00000 /* reserved0 */
#define SYSCFG_IOCR2_GPEN8_MUX_HI 0x00200000 /* RW see description for IOCR0's */
#define SYSCFG_IOCR2_reserved1 0x001E0000 /* reserved1 */
#define SYSCFG_IOCR2_SOFTRSTO_MUX 0x00010000 /* RW Soft Reset output mux select */
#define SYSCFG_IOCR2_OTGCTRL1_MUX 0xC000 /* RW Mux CE5N/GPEN[15] onto OTGCT */
#define SYSCFG_IOCR2_HSOTG2 0x2000 /* RW AHB Slave port Endian select */
#define SYSCFG_IOCR2_HSOTG1 0x1000 /* RW AHB Master port Endian selec */
#define SYSCFG_IOCR2_GPIO 0x0800 /* RW 31-16 slew control Slew cont */
#define SYSCFG_IOCR2_GPIO15_0 0x0400 /* RW slew control Slew control on */
#define SYSCFG_IOCR2_reserved2 0x0380 /* reserved2 */
#define SYSCFG_IOCR2_SIMDAT_HYS 0x0040 /* RW Hysteresis control on SIMDAT */
#define SYSCFG_IOCR2_OSC2_SELECT 0x0030 /* RW Select sel1, sel0 of OSC2, */
#define SYSCFG_IOCR2_OSC2_ENABLE 0x0008 /* RW Enable OSC2 - observe output */
#define SYSCFG_IOCR2_OSC1_SELECT 0x0006 /* RW Select sel1, sel0 of OSC1, */
#define SYSCFG_IOCR2_OSC1_ENABLE 0x0001 /* RW Enable OSC1 - observe output */
#define SYSCFG_PIDR (SYSCFG_BASE_ADDR + 0x0010) /* PIDR bit Product ID Register */
#define SYSCFG_PIDR_reserved0 0xFFFFF000 /* reserved0 */
#define SYSCFG_PIDR_PID 0x0F00 /* RO Product Family ID, 0x2: 2G ba */
#define SYSCFG_PIDR_RID 0x00F0 /* RO Product ID, */
#define SYSCFG_PIDR_TID 0x000F /* RO Revision ID, */
#define SYSCFG_DSPCTRL (SYSCFG_BASE_ADDR + 0x0014) /* DSPCTRL bit DSP Control Register */
#define SYSCFG_DSPCTRL_reserved0 0xFFFFFF00 /* reserved0 */
#define SYSCFG_DSPCTRL_AUDIOSRST 0x0080 /* RW Put Audio in the following */
#define SYSCFG_DSPCTRL_SYNCEXTPRAM 0x0040 /* RW External DSP PRAM Type, Th */
#define SYSCFG_DSPCTRL_JTAGINTWAKE 0x0020 /* RW Control whether DSP JTAG i */
#define SYSCFG_DSPCTRL_en_TRST 0x0010 /* RW Control whether DSP JTAG T */
#define SYSCFG_DSPCTRL_reserved1 0x000F /* reserved1 */
#define SYSCFG_PUMR (SYSCFG_BASE_ADDR + 0x0018) /* PUMR bit Power Up Mode Register */
#define SYSCFG_PUMR_PUMODE 0xFFFFFFFF /* RW Power up mode, */
#define SYSCFG_IOCR3 (SYSCFG_BASE_ADDR + 0x001C) /* IOCR3 bit I/O Configuration Register 3 */
#define SYSCFG_IOCR3_reserved0 0xF8000000 /* reserved0 */
#define SYSCFG_IOCR3_X_TRIG_EN 0x04000000 /* RW 0 Cross-triggering among mul */
#define SYSCFG_IOCR3_reserved1 0x03000000 /* reserved1 */
#define SYSCFG_IOCR3_SIM2_DIS 0x00800000 /* RW 0 Normal operation, 1 SIM2 p */
#define SYSCFG_IOCR3_UARTC_DIS 0x00400000 /* RW 0 Normal operation, 1 UARTC */
#define SYSCFG_IOCR3_TWIF_ENB 0x00200000 /* RW 0 WCDMA 3-wire interface pin */
#define SYSCFG_IOCR3_PC_DIS 0x00100000 /* RW 0 Normal operation, 1 PC out */
#define SYSCFG_IOCR3_SYN_DIS 0x00080000 /* RW 0 Normal operation, 1 SYNCLK */
#define SYSCFG_IOCR3_RXDCK_DIS 0x00040000 /* RW 0 Normal operation, 1 RXDCK */
#define SYSCFG_IOCR3_GPEN_DIS 0x00020000 /* RW 0 Normal operation, 1 GPEN0- */
#define SYSCFG_IOCR3_DA_DIS 0x00010000 /* RW 0 Normal operation, 1 DAILR */
#define SYSCFG_IOCR3_JTAG_DIS 0x8000 /* RW 0 Normal operation (default) */
#define SYSCFG_IOCR3_SPIMS_DIS 0x4000 /* RW 0 Normal operation, 1 SPI/MS */
#define SYSCFG_IOCR3_SIM_DIS 0x2000 /* RW 0 Normal operation, 1 SIM pa */
#define SYSCFG_IOCR3_UARTB_DIS 0x1000 /* RW 0 Normal operation, 1 UARTB */
#define SYSCFG_IOCR3_UARTA_DIS 0x0800 /* RW 0 Normal operation, 1 UARTA */
#define SYSCFG_IOCR3_CAMD_PD 0x0400 /* RW 0 CAMD0-7 pulldown not selec */
#define SYSCFG_IOCR3_CAMD_PU 0x0200 /* RW 0 CAMD0-7 pullup not selecte */
#define SYSCFG_IOCR3_CAMHVS_PD 0x0100 /* RW 0 CAMHS/CAMVS pulldown not s */
#define SYSCFG_IOCR3_CAMHVS_PU 0x0080 /* RW 0 CAMHS/CAMVS pullup not sel */
#define SYSCFG_IOCR3_CAMDCK_PD 0x0040 /* RW 0 CAMDCK pulldown not select */
#define SYSCFG_IOCR3_CAMDCK_PU 0x0020 /* RW 0 CAMDCK pullup not selected */
#define SYSCFG_IOCR3_CAMCK_DIS 0x0010 /* RW 0 Normal operation, 1 CAMCK */
#define SYSCFG_IOCR3_LCD_DIS 0x0008 /* RW 0 Normal operation, 1 LCD pa */
#define SYSCFG_IOCR3_NANDRDY_PD 0x0004 /* RW 0 NANDRDY pulldown not selec */
#define SYSCFG_IOCR3_MBWAIT_PD 0x0002 /* RW 0 MBWAIT pulldown not select */
#define SYSCFG_IOCR3_MEM_DIS 0x0001 /* RW 0 Normal operation, 1 Memory */
#define SYSCFG_IOCR4 (SYSCFG_BASE_ADDR + 0x0020) /* IOCR4 bit I/O Configuration Register 4 */
#define SYSCFG_IOCR4_DAT_PULL 0x80000000 /* RW Pulldown FALE/FCLE pads on r */
#define SYSCFG_IOCR4_LCD_slew 0x40000000 /* RW Slew control on LCD pads., */
#define SYSCFG_IOCR4_SDIO_drive 0x38000000 /* RW Control drive strength of SD */
#define SYSCFG_IOCR4_ETM_drive 0x07000000 /* RW Control drive strengths of T */
#define SYSCFG_IOCR4_LCD_drive 0x00E00000 /* RW Control drive strength of LC */
#define SYSCFG_IOCR4_SIM_drive 0x001C0000 /* RW Control drive strength of SI */
#define SYSCFG_IOCR4_RF_Drive 0x00038000 /* RW Control drive strength of RF */
#define SYSCFG_IOCR4_Camera_Drive 0x7000 /* RW Control drive strength of Ca */
#define SYSCFG_IOCR4_SDMCLK 0x0E00 /* RW Control drive strength of SD */
#define SYSCFG_IOCR4_MBCK_Drive 0x01C0 /* RW Control drive strength of MB */
#define SYSCFG_IOCR4_DAT31_15_Drive 0x0038 /* RW Control drive strength of DA */
#define SYSCFG_IOCR4_MEM_Drive 0x0007 /* RW Control drive strength of al */
#define SYSCFG_IOCR5 (SYSCFG_BASE_ADDR + 0x0024) /* IOCR5 bit I/O Configuration Register 5 */
#define SYSCFG_IOCR5_reserved0 0xE0000000 /* reserved0 */
#define SYSCFG_IOCR5_TRACEBUS_select 0x18000000 /* RW Selects source for TRACE BUS */
#define SYSCFG_IOCR5_Pin_mux_Control_cam 0x06000000 /* RW Camera Port Pin muxing Contr */
#define SYSCFG_IOCR5_Pin_mux_control_sd2 0x01000000 /* RW SD2DAT to SD2DAT7 Pin Muxing */
#define SYSCFG_IOCR5_GPIO27_MUX 0x00C00000 /* RW GPIO27 Pin Muxing Control, 0 */
#define SYSCFG_IOCR5_GPIO26_MUX 0x00300000 /* RW GPIO26 Pin Muxing Control, 0 */
#define SYSCFG_IOCR5_Pin_mux_control_gpio32 0x000FF000 /* RW GPIO32 to GPIO41 Pin Muxing */
#define SYSCFG_IOCR5_GPIO35_MUX 0x0C00 /* RW GPIO35 Pin Muxing Control, 0 */
#define SYSCFG_IOCR5_GPIO34_MUX 0x0300 /* RW GPIO34 Pin Muxing Control, 0 */
#define SYSCFG_IOCR5_GPIO33_MUX 0x00C0 /* RW GPIO33 Pin Muxing Control, 0 */
#define SYSCFG_IOCR5_GPIO32_MUX 0x0030 /* RW GPIO32 Pin Muxing Control, 0 */
#define SYSCFG_IOCR5_GPIO31_MUX 0x000C /* RW GPIO31 Pin Muxing Control, 0 */
#define SYSCFG_IOCR5_reserved1 0x0003 /* reserved1 */
#define SYSCFG_IOCR6 (SYSCFG_BASE_ADDR + 0x0028) /* IOCR6 bit I/O Configuration Register 6 */
#define SYSCFG_IOCR6_reserved0 0x80000000 /* reserved0 */
#define SYSCFG_IOCR6_GPIO21_18_MUX 0x40000000 /* RW muxes out DCSSEL, PCSSEL ont */
#define SYSCFG_IOCR6_Csi_ccp_b 0x20000000 /* RW Controls CSI or CCP mode of */
#define SYSCFG_IOCR6_Cam_mode 0x18000000 /* RW Controls cam_mode of isp_top */
#define SYSCFG_IOCR6_DDAC_FC_pwrdn 0x06000000 /* RW Controls i_d0_pwrdn and i_d1 */
#define SYSCFG_IOCR6_SPI_Drive_strength_control 0x01C00000
/* RW Controls slew on SPI pads, [24], ![23], [22] -- Note but 23 is used inverted, 100 2 mA, 010 4 mA -- Default v */
#define SYSCFG_IOCR6_SPI_slew_control 0x00200000 /* RW Controls Slew on the SPI pad */
#define SYSCFG_IOCR6_DSPDebug_bus 0x00100000 /* RW If this bit is true, the DSP */
#define SYSCFG_IOCR6_GPIO25_24_MUX 0x00080000 /* RW refer IOCR2[3] and IOCR2[0] */
#define SYSCFG_IOCR6_DIGI_PD_XO_enable 0x00040000 /* RW Enable DIGI_PD_XO control vi */
#define SYSCFG_IOCR6_PD_XO_BN_enable 0x00020000 /* RW Enables PD_XO_BN control via */
#define SYSCFG_IOCR6_PD_XO_BP_enable 0x00010000 /* RW Enables PD_XO_BP control via */
#define SYSCFG_IOCR6_PD_XO 0xE000 /* RW override Override PD_XO cont */
#define SYSCFG_IOCR6_PD 0x1C00 /* RW XO_sw_control Software contr */
#define SYSCFG_IOCR6_PD_XO_polarity 0x0380 /* RW Inverts polarity of PD_XO wh */
#define SYSCFG_IOCR6_GPIO23_MUX 0x0040 /* RW mux out PWM3 output, 0 GPIO2 */
#define SYSCFG_IOCR6_GPIO22_MUX 0x0020 /* RW mux out PWM2 output, 0 GPIO2 */
#define SYSCFG_IOCR6_GPIO21_MUX 0x0010 /* RW concatenated with iocr6[30], */
#define SYSCFG_IOCR6_GPIO20_MUX 0x0008 /* RW concatenated with iocr6[30], */
#define SYSCFG_IOCR6_reserved1 0x0004 /* reserved1 */
#define SYSCFG_IOCR6_D1W_maps_to_GPIO 0x0002 /* RW If true, D1W maps to GPIO[21 */
#define SYSCFG_IOCR6_reserved2 0x0001 /* reserved2 */
#define SYSCFG_IOCR7 (SYSCFG_BASE_ADDR + 0x002C) /* IOCR7 bit I/O Configuration Register 7 */
#define SYSCFG_IOCR7_RFGPIO5_MUX 0x80000000 /* RW GPIO mux control on RFGPIO p */
#define SYSCFG_IOCR7_RFGPIO4_MUX 0x40000000 /* RW GPIO mux control on RFGPIO p */
#define SYSCFG_IOCR7_RFGPIO3_MUX 0x20000000 /* RW GPIO mux control on RFGPIO p */
#define SYSCFG_IOCR7_RFGPIO2_MUX 0x10000000 /* RW GPIO mux control on RFGPIO p */
#define SYSCFG_IOCR7_reserved0 0x0FFC0000 /* reserved0 */
#define SYSCFG_IOCR7_RX3G_PULL 0x00030000 /* RW RX3GDATA[2:0] pull control, */
#define SYSCFG_IOCR7_TX3G_SLEW 0x8000 /* RW Slew control on TXDATA3G[2:0 */
#define SYSCFG_IOCR7_TX3G_DRIVE 0x7000 /* RW Drive strength control of TX */
#define SYSCFG_IOCR7_CLKX8_SLEW 0x0800 /* RW Slew control on CLKX8 pad, 0 */
#define SYSCFG_IOCR7_CLKX8_DRIVE 0x0700 /* RW Drive strength control of CL */
#define SYSCFG_IOCR7_reserved1 0x00C0 /* reserved1 */
#define SYSCFG_IOCR7_DATASEL_3G2G 0x0030 /* RW Select between 3G/2G data in */
#define SYSCFG_IOCR7_reserved2 0x000E /* reserved2 */
#define SYSCFG_IOCR7_CTRLSEL_3G2G 0x0001 /* RW Select between 3G/2G control */
#define SYSCFG_TVENCCR (SYSCFG_BASE_ADDR + 0x0034) /* TVENCCR bit Analog TV Out (TVENC) Configurat */
#define SYSCFG_TVENCCR_reserved0 0xFFFFFFC0 /* reserved0 */
#define SYSCFG_TVENCCR_DMA_WAIT_CYCLE 0x0030 /* RW DMA Request Wait Cycle: 0 */
#define SYSCFG_TVENCCR_reserved1 0x0008 /* reserved1 */
#define SYSCFG_TVENCCR_DISPLAYC_HRSTN 0x0004 /* RW Displayc Reset, Active Low */
#define SYSCFG_TVENCCR_VEC_HRSTN 0x0002 /* RW Vec Reset, Active Low, */
#define SYSCFG_TVENCCR_TVENC_EN 0x0001 /* RW Enable Analog TV Out Modul */
#define SYSCFG_DSICR (SYSCFG_BASE_ADDR + 0x0038) /* DSICR bit MIPI DSI Configuration Register */
#define SYSCFG_DSICR_reserved0 0xFFFFFFFE /* reserved0 */
#define SYSCFG_DSICR_DSI_EN 0x0001 /* RW Enable AHB Clock to the MIPI */
#define SYSCFG_MCR (SYSCFG_BASE_ADDR + 0x0040) /* MCR bit Boot ROM Remap Register */
#define SYSCFG_MCR_reserved0 0xFFFFFFFE /* reserved0 */
#define SYSCFG_MCR_REMAP 0x0001 /* WO Writing to this register remov */
#define SYSCFG_MRR (SYSCFG_BASE_ADDR + 0x0044) /* MRR bit Boot ROM Restore Register */
#define SYSCFG_MRR_RESTORE 0xFFFFFFFF /* WO This register restores the boo */
#define SYSCFG_RAMCTRL (SYSCFG_BASE_ADDR + 0x0048) /* RAMCTRL bit RAM Control Register */
#define SYSCFG_RAMCTRL_reserved0 0xFFFE0000 /* reserved0 */
#define SYSCFG_RAMCTRL_RED_OTP_RST 0x00010000 /* RW Redundancy OTP soft reset, */
#define SYSCFG_RAMCTRL_reserved1 0xF800 /* reserved1 */
#define SYSCFG_RAMCTRL_ROM_TM 0x07C0 /* RW ROM Testmode bits, */
#define SYSCFG_RAMCTRL_SRAM_TM 0x003C /* RW SRAM Testmode bits, */
#define SYSCFG_RAMCTRL_STBY 0x0003 /* RW SRAM STBY bits, */
#define SYSCFG_SECCTRL (SYSCFG_BASE_ADDR + 0x0050) /* SECCTRL bit Security Control Register */
#define SYSCFG_SECCTRL_reserved0 0xFFFFFFE0 /* reserved0 */
#define SYSCFG_SECCTRL_CRYPTO_DIS 0x0010 /* RW 0 Enable access to DES and */
#define SYSCFG_SECCTRL_reserved1 0x0008 /* reserved1 */
#define SYSCFG_SECCTRL_OTP_DIS 0x0004 /* RW 0 Enable access to the OTP */
#define SYSCFG_SECCTRL_RTC_DIS_WR 0x0002 /* RW 0 Enable write access to t */
#define SYSCFG_SECCTRL_BRM_DIS_RD 0x0001 /* RW 0 Enable read access to th */
#define SYSCFG_SECSTAT (SYSCFG_BASE_ADDR + 0x0054) /* SECSTAT bit Security Status Register */
#define SYSCFG_SECSTAT_reserved0 0xC0000000 /* reserved0 */
#define SYSCFG_SECSTAT_SEC_MODE_STATE 0x3C000000 /* RO Secure Mode State:, 4'b000 */
#define SYSCFG_SECSTAT_JTAG_DIS 0x02000000 /* RO nvm_glb_disable_jtag (READ */
#define SYSCFG_SECSTAT_ETM_DIS 0x01000000 /* RO nvm_glb_disable_etm (READ- */
#define SYSCFG_SECSTAT_SECURE_DEBUG 0x00FFFFFF /* RO SECURE DEBUG bits from OTP */
#define SYSCFG_MARM11CR (SYSCFG_BASE_ADDR + 0x0058) /* MARM11CR bit Modem ARM11 Top Configuration R */
#define SYSCFG_MARM11CR_reserved0 0xFFFFFFFC /* reserved0 */
#define SYSCFG_MARM11CR_Page 0x0003 /* RW Page[1:0] Controls the pa */
#define SYSCFG_MAMBACR (SYSCFG_BASE_ADDR + 0x0060) /* MAMBACR bit Mamba Top Configuration Register */
#define SYSCFG_MAMBACR_MAMBA_STATUS 0xFF000000 /* RW Mamba (EMI) Status (READ-O */
#define SYSCFG_MAMBACR_reserved0 0x00FFF000 /* reserved0 */
#define SYSCFG_MAMBACR_NOR_REQUEST 0x0800 /* RW I_nor_request, */
#define SYSCFG_MAMBACR_reserved1 0x0400 /* reserved1 */
#define SYSCFG_MAMBACR_PWRDN_EXIT 0x0200 /* RW I_hw_pwrdwn_exit, */
#define SYSCFG_MAMBACR_HIB_EXIT_MODE 0x0100 /* RW I_hib_exit_mode, */
#define SYSCFG_MAMBACR_FREQ_CHANGE_REQ 0x0080 /* RW I_freq_change_req, */
#define SYSCFG_MAMBACR_EMI_TEST 0x0040 /* RW I_emi_test, */
#define SYSCFG_MAMBACR_DEBUG_CTRL 0x003C /* RW I_debug_ctrl[3:0], */
#define SYSCFG_MAMBACR_CLK_EMI_EQ_DRAM 0x0002 /* RW I_clk_emi_eq_dram, */
#define SYSCFG_MAMBACR_reserved2 0x0001 /* reserved2 */
#define SYSCFG_RFCR (SYSCFG_BASE_ADDR + 0x0064) /* RFCR bit Integrated RF Top Configuration Reg */
#define SYSCFG_RFCR_reserved0 0xFFFFFFFF /* reserved0 */
#define SYSCFG_ANACR4 (SYSCFG_BASE_ADDR + 0x0090) /* ANACR4 bit Analog Configuration Register 4 */
#define SYSCFG_ANACR4_reserved0 0xFFFFFFF0 /* reserved0 */
#define SYSCFG_ANACR4_i_shuffctl 0x000C /* RW Shuffler Control:, 00: 1-z- */
#define SYSCFG_ANACR4_i_refampbctl 0x0003 /* RW Reference buffer bias contr */
#define SYSCFG_ANACR5 (SYSCFG_BASE_ADDR + 0x0094) /* ANACR5 bit Analog Configuration Register 5 */
#define SYSCFG_ANACR5_BGTC 0xC0000000 /* RW Temperature compensation ad */
#define SYSCFG_ANACR5_reserved0 0x3F800000 /* reserved0 */
#define SYSCFG_ANACR5_AUX_ADC_SC 0x00600000 /* RW WCDMA Aux DAC Clock Select, */
#define SYSCFG_ANACR5_AUX_DAC_SI 0x00180000 /* RW WCDMA Aux DAC Power Down, 1 */
#define SYSCFG_ANACR5_AUX_DAC0_SC 0x00060000 /* RW WCDMA Aux ADC 0 Clock Selec */
#define SYSCFG_ANACR5_reserved1 0x0001F800 /* reserved1 */
#define SYSCFG_ANACR5_AUX_DAC_CM 0x0780 /* RW WCDMA Aux DAC 0 Output Comm */
#define SYSCFG_ANACR5_AUX_DAC_PD 0x0040 /* RW WCDMA Aux DAC0 Output Pull */
#define SYSCFG_ANACR5_AUX_DAC_IBIAS 0x0030 /* RW WCDMA Aux DAC 0 Opamp Bias */
#define SYSCFG_ANACR5_AUX_CLK_CTRL 0x0008 /* RW WCDMA Aux Clock 0 On/Off Co */
#define SYSCFG_ANACR5_AUX_CLK_INV 0x0004 /* RW WCDMA Aux Clock 0 Signal In */
#define SYSCFG_ANACR5_AUX_Output 0x0002 /* RW Two?s Compliment/Offset B */
#define SYSCFG_ANACR5_Pwd_Aux_dac 0x0001 /* RW Aux DAC 0 Power down, 1: Po */
#define SYSCFG_ANACR6 (SYSCFG_BASE_ADDR + 0x0098) /* ANACR6 bit Analog Configuration Register 6 */
#define SYSCFG_ANACR6_reserved0 0xFFF80000 /* reserved0 */
#define SYSCFG_ANACR6_auxdac1sc 0x00060000 /* RW WCDMA Aux DAC1 Clock Select */
#define SYSCFG_ANACR6_reserved1 0x0001F800 /* reserved1 */
#define SYSCFG_ANACR6_AUX_DAC_CM 0x0780 /* RW WCDMA Aux DAC 1 Output Comm */
#define SYSCFG_ANACR6_AUX_DAC_PD 0x0040 /* RW WCDMA Aux DAC 1 Output Pull */
#define SYSCFG_ANACR6_AUX_DAC_IBIAS 0x0030 /* RW WCDMA Aux DAC 1 Opamp Bias */
#define SYSCFG_ANACR6_AUX_CLK_CTRL 0x0008 /* RW WCDMA AUX Clock 1 On/Off Co */
#define SYSCFG_ANACR6_AUX_CLK_INV 0x0004 /* RW WCDMA AUX Clock 1 Signal In */
#define SYSCFG_ANACR6_WCDMA 0x0002 /* RW AUX DAC Twos Complement/Off */
#define SYSCFG_ANACR6_Pwd_Aux_dac 0x0001 /* RW Aux DAC 0 Power down, 1: Po */
#define SYSCFG_ANACR7 (SYSCFG_BASE_ADDR + 0x009C) /* ANACR7 bit Analog Configuration Register 7 */
#define SYSCFG_ANACR7_reserved0 0x80000000 /* reserved0 */
#define SYSCFG_ANACR7_rxsc 0x40000000 /* RW WCDMA RX Clock Select, 0: N */
#define SYSCFG_ANACR7_reserved1 0x20000000 /* reserved1 */
#define SYSCFG_ANACR7_disablehvregulator 0x10000000 /* RW 0 Normal Operationg, 1 Disa */
#define SYSCFG_ANACR7_inputcmenable 0x08000000 /* RW WCDMA RX AC Common Mode Con */
#define SYSCFG_ANACR7_albselect 0x04000000 /* RW WCDMA RX Analog Loopback Co */
#define SYSCFG_ANACR7_shufflectrl 0x03000000 /* RW WCDMA RX ADC Shuffle Contro */
#define SYSCFG_ANACR7_dithctrl 0x00C00000 /* RW WCDMA RX ADC Dither Control */
#define SYSCFG_ANACR7_vcmctrl_FLASH 0x00300000 /* RW WCDMA RX ADC FLASH Common M */
#define SYSCFG_ANACR7_vcmctrl_INT2 0x000C0000 /* RW WCDMA RX ADC INT2 output Co */
#define SYSCFG_ANACR7_vcmctrl_INT1 0x00030000 /* RW WCDMA RX ADC INT1 output Co */
#define SYSCFG_ANACR7_dithrefoffsetctrl 0xC000 /* RW WCDMA RX ADC Dither Referen */
#define SYSCFG_ANACR7_adcrefoffsetctrl 0x3000 /* RW WCDMA RX ADC reference offs */
#define SYSCFG_ANACR7_biasctrl_INT2 0x0C00 /* RW WCDMA RX ADC INT2 Bias Curr */
#define SYSCFG_ANACR7_biasctrl_INT1 0x0300 /* RW WCDMA RX ADC INT1 Bias Curr */
#define SYSCFG_ANACR7_biasctrl_ADC 0x00C0 /* RW WCDMA RX ADC Bias Current C */
#define SYSCFG_ANACR7_rxpgaset 0x003C /* RW WCDMA RX ADC Input PGA Cont */
#define SYSCFG_ANACR7_rxresetb 0x0002 /* RW 0 Reset WCDMA RX Signal, 1 */
#define SYSCFG_ANACR7_rxpwrdn 0x0001 /* RW 0 WCDMA RX Normal Operation */
#define SYSCFG_ANACR8 (SYSCFG_BASE_ADDR + 0x00A0) /* ANACR8 bit Analog Configuration Register 8 */
#define SYSCFG_ANACR8_reserved0 0xFC000000 /* reserved0 */
#define SYSCFG_ANACR8_ibcasctrl 0x03800000 /* RW WCDMA Transmit DAC Cascade */
#define SYSCFG_ANACR8_reserved1 0x00400000 /* reserved1 */
#define SYSCFG_ANACR8_ibcmctrl 0x00300000 /* RW WCDMA Transmit Common-Mode */
#define SYSCFG_ANACR8_ibampctrl 0x000E0000 /* RW WCDMA Transmit Buffer Ampli */
#define SYSCFG_ANACR8_txampctrl 0x0001E000 /* RW WCDMA Transmit Amplitude co */
#define SYSCFG_ANACR8_txvcmctrl 0x1C00 /* RW WCDMA Transmit Common-Mode */
#define SYSCFG_ANACR8_clk_disable 0x0200 /* RW WCDMA Transmit Clock Contro */
#define SYSCFG_ANACR8_clkinv 0x0100 /* RW WCDMA Transmit Clock Invers */
#define SYSCFG_ANACR8_txobb 0x0080 /* RW WCDMA Transmit Output Forma */
#define SYSCFG_ANACR8_TXSC 0x0060 /* RW WCDMA Transmit Clock Select */
#define SYSCFG_ANACR8_TXSIQ 0x0018 /* RW WCDMA Transmit Q channel Da */
#define SYSCFG_ANACR8_TXSII 0x0006 /* RW WCDMA Transmit I Channel Da */
#define SYSCFG_ANACR8_pwrdnTX 0x0001 /* RW WCDMA Transmit Power Down, */
#define SYSCFG_ANACR9 (SYSCFG_BASE_ADDR + 0x00A4) /* ANACR9 bit Analog Configuration Register 9 */
#define SYSCFG_ANACR9_Generic_ctl 0x80000000 /* RW USB Remote Wake-Up Enable i */
#define SYSCFG_ANACR9_suspend_eco_fix_en 0x40000000 /* RW 0 Not enables the fix for t */
#define SYSCFG_ANACR9_reserved0 0x20000000 /* reserved0 */
#define SYSCFG_ANACR9_afe_non_driving 0x10000000 /* RW Removes all Termination and */
#define SYSCFG_ANACR9_afe_chrpten 0x08000000 /* RW USB Chirp Transmit Control, */
#define SYSCFG_ANACR9_afe_lpback 0x04000000 /* RW 1 USB TX Data Comes Back in */
#define SYSCFG_ANACR9_afe_cdrcken 0x02000000 /* RW && afe_clken USB 960/480/12 */
#define SYSCFG_ANACR9_clk_60_invert 0x01000000 /* RW ???, 0 Invert the Clk60 to */
#define SYSCFG_ANACR9_afe_hstxen 0x00800000 /* RW USB High Speed Current Cont */
#define SYSCFG_ANACR9_reserved1 0x00400000 /* reserved1 */
#define SYSCFG_ANACR9_UTMI_loopback 0x00200000 /* RW UTMI Logic Control, 0 Enabl */
#define SYSCFG_ANACR9_afe_rxlogicr 0x00100000 /* RW This is the CDR 480 clock e */
#define SYSCFG_ANACR9_iost_control 0x00080000 /* RW Direct control over the ios */
#define SYSCFG_ANACR9_Afe_clsp 0x00040000 /* RW resume_filterb, */
#define SYSCFG_ANACR9_Iddq_en 0x00020000 /* RW Bit stuff error enable;, 0 */
#define SYSCFG_ANACR9_reserved2 0x00018000 /* reserved2 */
#define SYSCFG_ANACR9_utmi_discon_phy 0x4000 /* RW 0 Default, 1 Host sees a di */
#define SYSCFG_ANACR9_tx_phase 0x2000 /* RW Flipping 480MHz phase cause */
#define SYSCFG_ANACR9_sync_det_length 0x1C00 /* RW Adjusts USB RX sync detecti */
#define SYSCFG_ANACR9_reset_hi_pll 0x0200 /* RW USB PLL Reset, 0 Disabled, */
#define SYSCFG_ANACR9_pll_suspend_en 0x0100 /* RW USB PLL Power-down During S */
#define SYSCFG_ANACR9_chrp_rx_sel 0x0080 /* RW USB RX Chirp Detection, 0 A */
#define SYSCFG_ANACR9_utmi_pwrdwnb 0x0040 /* RW Port Digital Power-Down, 0 */
#define SYSCFG_ANACR9_soft_resetb 0x0020 /* RW UTMI Soft Reset Control, 0 */
#define SYSCFG_ANACR9_pll_pwrdwnb 0x0010 /* RW PLL Power-Down, 1 Power-On, */
#define SYSCFG_ANACR9_pll_calen 0x0008 /* RW PLL Calibration Control, 0 */
#define SYSCFG_ANACR9_otg_mode 0x0004 /* RW USB Mode Selection, 0 Stand */
#define SYSCFG_ANACR9_hostb_dev 0x0002 /* RW USB Host Selection, 0 Host, */
#define SYSCFG_ANACR9_afe_pwrdwnb 0x0001 /* RW Analog Port Power Control, */
#define SYSCFG_ANACR11 (SYSCFG_BASE_ADDR + 0x00AC) /* ANACR11 bit Analog Configuration Register 11 */
#define SYSCFG_ANACR11_reserved0 0xFFFFFFFF /* reserved0 */
#define SYSCFG_ANACR12 (SYSCFG_BASE_ADDR + 0x00B0) /* ANACR12 bit Analog Configuration Register 12 */
#define SYSCFG_ANACR12_reserved0 0xFFFFFFFF /* reserved0 */
#define SYSCFG_ANACR13 (SYSCFG_BASE_ADDR + 0x00B4) /* ANACR13 bit Analog Configuration Register 13 */
#define SYSCFG_ANACR13_reserved0 0xFFFFFFFF /* reserved0 */
#define SYSCFG_ANACR14 (SYSCFG_BASE_ADDR + 0x00B8) /* ANACR14 bit Analog Configuration Register 14 */
#define SYSCFG_ANACR14_reserved0 0xFFFFFFFF /* reserved0 */
#define SYSCFG_ANACR15 (SYSCFG_BASE_ADDR + 0x00BC) /* ANACR15 bit Analog Configuration Register 15 */
#define SYSCFG_ANACR15_reserved0 0xFFFFFFFF /* reserved0 */
#define SYSCFG_IRDROP_MON0 (SYSCFG_BASE_ADDR + 0x00C0) /* IRDROP_MON0 bit IRDROP Monitor Register 0 */
#define SYSCFG_IRDROP_MON0_reserved0 0xFFFFF000 /* reserved0 */
#define SYSCFG_IRDROP_MON0_OSC_EN 0x0800 /* RW Reserved - For Interna */
#define SYSCFG_IRDROP_MON0_MON_EN 0x0400 /* RW Write 1 to enable coun */
#define SYSCFG_IRDROP_MON0_CNT_OUT 0x03FF /* RW Software can read this */
#define SYSCFG_IRDROP_MON1 (SYSCFG_BASE_ADDR + 0x00C4) /* IRDROP_MON1 bit IRDROP Monitor Register 1 */
#define SYSCFG_IRDROP_MON1_reserved0 0xFFFFF000 /* reserved0 */
#define SYSCFG_IRDROP_MON1_OSC_EN 0x0800 /* RW Reserved - For Interna */
#define SYSCFG_IRDROP_MON1_MON_EN 0x0400 /* RW Write 1 to enable coun */
#define SYSCFG_IRDROP_MON1_CNT_OUT 0x03FF /* RW Software can read this */
#define SYSCFG_IRDROP_MON2 (SYSCFG_BASE_ADDR + 0x00C8) /* IRDROP_MON2 bit IRDROP Monitor Register 2 */
#define SYSCFG_IRDROP_MON2_reserved0 0xFFFFF000 /* reserved0 */
#define SYSCFG_IRDROP_MON2_OSC_EN 0x0800 /* RW Reserved - For Interna */
#define SYSCFG_IRDROP_MON2_MON_EN 0x0400 /* RW Write 1 to enable coun */
#define SYSCFG_IRDROP_MON2_CNT_OUT 0x03FF /* RW Software can read this */
#define SYSCFG_MDIO_WRITE (SYSCFG_BASE_ADDR + 0x00CC) /* MDIO_WRITE MDIO WRITE Register */
#define SYSCFG_MDIO_WRITE_WRITE 0x80000000 /* RW START MDIO write strobe */
#define SYSCFG_MDIO_WRITE_READ 0x40000000 /* RW START MDIO read strobe, */
#define SYSCFG_MDIO_WRITE_MDIO3_SM_SEL 0x20000000 /* RW 1 : Uses internal regis */
#define SYSCFG_MDIO_WRITE_MDIO2 0x1F000000 /* RW ID 5-bit ID. This must */
#define SYSCFG_MDIO_WRITE_MDIO1 0x00FF0000 /* RW ADDR Register address, */
#define SYSCFG_MDIO_WRITE_REG 0xFFFF /* RW WR DATA Register write */
#define SYSCFG_MDIO_READ (SYSCFG_BASE_ADDR + 0x00D0) /* MDIO_READ MDIO READ Register */
#define SYSCFG_MDIO_READ_reserved0 0xFFFC0000 /* reserved0 */
#define SYSCFG_MDIO_READ_VBUS_STAT2 0x00020000 /* RW vbus_status2 read data, */
#define SYSCFG_MDIO_READ_VBUS_STAT1 0x00010000 /* RW vbus_status1 read data, */
#define SYSCFG_MDIO_READ_REG_RD_DATA 0xFFFF /* RW Register read data, */
#define SYSCFG_PERIPH_AHB_CLK_GATE_MASK (SYSCFG_BASE_ADDR + 0x00D8) /* PERIPHERALS AHB CLOCK GATE MASK REGISTER */
#define SYSCFG_PERIPH_AHB_CLK_GATE_MASK_PERIPH_AHB_CLK_GATE_MASK 0xFFFFFFFF
/* RW AHB Clock Gate Mask Register (Read-Write), */
#define SYSCFG_PERIPH_AHB_CLK_GATE_FORCE (SYSCFG_BASE_ADDR + 0x00DC) /* PERIPHERALS AHB CLOCK GATE FORCE REGISTER */
#define SYSCFG_PERIPH_AHB_CLK_GATE_FORCE_PERIPH_AHB_CLK_GATE_FORCE 0xFFFFFFFF
/* RW AHB Clock Gate Force Register (Read-Write), */
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW (SYSCFG_BASE_ADDR + 0x00E0)
/* PERIPHERALS AHB CLOCK GATE MONITOR RAW REGISTE */
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_RAW_PERIPH_AHB_CLK_GATE_MON_RAW 0xFFFFFFFF
/* RO AHB Clock Gate Monitor Raw Register (Read-Only), */
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR (SYSCFG_BASE_ADDR + 0x00E4)
/* PERIPHERALS AHB CLOCK GATE MONITOR REGISTE */
#define SYSCFG_PERIPH_AHB_CLK_GATE_MONITOR_PERIPH_AHB_CLK_GATE_MON 0xFFFFFFFF
/* RO AHB Clock Gate Monitor Register (Read-Only), */
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0100)
/* PERIPHERALS VIDEO CODEC AHB CLOCK ENABLE AND REQUEST REGISTE */
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_VIDEO_CODEC_AHB_CLK_EN_EN 0x0001
/* RW Video Codec AHB Clock Enable and Request (Read-Write), */
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0104)
/* PERIPHERALS CAMARA INTERFACE AHB CLOCK ENABLE AND REQUEST REGISTE */
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_CAMARA_INTERFACE_AHB_CLK_EN_EN 0x0001
/* RW Camara Interface AHB Clock Enable and Request (Read-Write), */
#define SYSCFG_PERIPH_USB_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0108) /* PERIPHERALS USB AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_USB_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_USB_AHB_CLK_EN_EN 0x0001 /* RW USB AHB Cloc */
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x010C) /* PERIPHERALS GEA AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_GEA_AHB_CLK_EN_EN 0x0001 /* RW GEA AHB Cloc */
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0110) /* PERIPHERALS CRYPTO AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_CRYPTO_AHB_CLK_EN_EN 0x0001 /* RW Crypto AH */
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0114) /* PERIPHERALS PKA AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_PKA_AHB_CLK_EN_EN 0x0001 /* RW PKA AHB Cloc */
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0118) /* PERIPHERALS UARTA AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_UARTA_AHB_CLK_EN_EN 0x0001 /* RW UARTA AHB */
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x011C) /* PERIPHERALS UARTB AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_UARTB_AHB_CLK_EN_EN 0x0001 /* RW UARTB AHB */
#define SYSCFG_PERIPH_DA_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0120) /* PERIPHERALS DA AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_DA_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_DA_AHB_CLK_EN_EN 0x0001 /* RW DA AHB Clock */
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0124) /* PERIPHERALS MPCLK AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_MPCLK_AHB_CLK_EN_EN 0x0001 /* RW MPClk AHB */
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x012C) /* PERIPHERALS LCD AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_LCD_AHB_CLK_EN_EN 0x0001 /* RW LCD AHB Cloc */
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0130) /* PERIPHERALS DPE AHB CLOCK ENABLE REGISTER AN */
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_DPE_AHB_CLK_EN_EN 0x0001 /* RW DPE AHB Cloc */
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0134) /* PERIPHERALS DMAC AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_DMAC_AHB_CLK_EN_EN 0x0001 /* RW DMAC AHB Cl */
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0138) /* PERIPHERALS SDIO1 AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_SDIO1_AHB_CLK_EN_EN 0x0001 /* RW SDIO1 AHB */
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x013C) /* PERIPHERALS SDIO2 AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_SDIO2_AHB_CLK_EN_EN 0x0001 /* RW SDIO2 AHB */
#define SYSCFG_PERIPH_DES_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0144) /* PERIPHERALS DES AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_DES_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_DES_AHB_CLK_EN_EN 0x0001 /* RW DES AHB Cloc */
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x014C) /* PERIPHERALS UARTC AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_UARTC_AHB_CLK_EN_EN 0x0001 /* RW UARTC AHB */
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0150) /* PERIPHERALS RNG AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_RNG_AHB_CLK_EN_EN 0x0001 /* RW RNG AHB Cloc */
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0154) /* PERIPHERALS SDIO3 AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_SDIO3_AHB_CLK_EN_EN 0x0001 /* RW SDIO3 AHB */
#define SYSCFG_PERIPH_TVENC_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0158) /* PERIPHERALS TVENC AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_TVENC_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_TVENC_AHB_CLK_EN_EN 0x0001 /* RW TVEnc AHB */
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x015C)
/* PERIPHERALS FSUSBHOST AHB CLOCK ENABLE REGISTE */
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_FSUSBHOST_AHB_CLK_EN_EN 0x0001
/* RW FSUSBHOST AHB Clock Enable (Read-Write), */
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN (SYSCFG_BASE_ADDR + 0x0160) /* PERIPHERALS MPHI AHB CLOCK ENABLE REGISTER */
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN_reserved0 0xFFFFFFFE
/* reserved0 */
#define SYSCFG_PERIPH_MPHI_AHB_CLK_EN_EN 0x0001 /* RW MPHI AHB Cl */
#define SYSCFG_TESTABILITY_ACCESS (SYSCFG_BASE_ADDR + 0x0170) /* TESTABILITY_ACCESS Testability Access Regist */
#define SYSCFG_TESTABILITY_ACCESS_reserved0 0xFFFFFFF0 /* reserved0 */
#define SYSCFG_TESTABILITY_ACCESS_ETM_LOCK 0x0008 /* RW ETM Disable Loc */
#define SYSCFG_TESTABILITY_ACCESS_SBD_DISABLE 0x0004
/* RW ETM Disable - Can only disable, ETM for Security Levels 1-2, For Security Level 0:, X: Enable ETM, For Securi */
#define SYSCFG_TESTABILITY_ACCESS_JTAG_DISABLE_LOCK 0x0002
/* RW JTAG Disable Lock - Used by software to lock or unlock JTAG based on Security Configuration and authenticated */
#define SYSCFG_TESTABILITY_ACCESS_JTAG_DISABLE 0x0001
/* RW JTAG Disable - Can only disable JTAG if security level 1 or higher., For Security Level 0:, X: Enable JTAG, F */
#define SYSCFG_DISABLE_OTP_REGION_READ_ACCESS (SYSCFG_BASE_ADDR + 0x0174)
/* DISABLE_OTP_REGION_READ_ACCESS Disable OTP Region Read Access Registe */
#define SYSCFG_DISABLE_OTP_REGION_READ_ACCESS_DIS_OTP_RGN_RD_n 0xFFFFFFFF
/* RW Disables read access to OTP Regions. (Write-once), 0: Read access to OTP region is enabled., 1: Read access t */
#define SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS (SYSCFG_BASE_ADDR + 0x0178)
/* DISABLE_OTP_REGION_WRITE_ACCESS Disable OTP Region Write Access Registe */
#define SYSCFG_DISABLE_OTP_REGION_WRITE_ACCESS_DIS_OTP_RGN_WR_n 0xFFFFFFFF
/* RW Disables Write access to OTP Regions. (Write-once), 0: Write (Programming) access to OTP region is enabled., */
#define SYSCFG_OTP_DEVICE_STATUS (SYSCFG_BASE_ADDR + 0x017C) /* OTP_DEVICE_STATUS OTP device status bits. */
#define SYSCFG_OTP_DEVICE_STATUS_reserved0 0xFFFF0000 /* reserved0 */
#define SYSCFG_OTP_DEVICE_STATUS_DEVICE_STATUS 0xFFFF
/* RW Device status bits from OTP. (READ-ONLY), */
#define SYSCFG_FPGA_Version (SYSCFG_BASE_ADDR + 0x01FC) /* FPGA_VERSION FPGA Release Version Number */
#define SYSCFG_FPGA_Version_reserved0 0xFFFFFF00 /* reserved0 */
#define SYSCFG_FPGA_Version_FPGA_RLS_ID 0x00FF /* RO FPGA Release ID. (REA */
#endif /* __BRCM_RDB_SYSCFG_H__ */
| sembre/kernel_totoro_update3 | modules/drivers/sound/brcm/alsa_athena/rdb/athena/B0/brcm_rdb_syscfg.h | C | gpl-2.0 | 156,905 |
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/sort.h>
#include <linux/rcupdate.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/percpu_counter.h>
#include <linux/lockdep.h>
#include "hash.h"
#include "tree-log.h"
#include "disk-io.h"
#include "print-tree.h"
#include "volumes.h"
#include "raid56.h"
#include "locking.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
#include "math.h"
#include "sysfs.h"
#include "qgroup.h"
#include "ref-verify.h"
#undef SCRAMBLE_DELAYED_REFS
/*
* control flags for do_chunk_alloc's force field
* CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
* if we really need one.
*
* CHUNK_ALLOC_LIMITED means to only try and allocate one
* if we have very few chunks already allocated. This is
* used as part of the clustering code to help make sure
* we have a good pool of storage to cluster in, without
* filling the FS with empty chunks
*
* CHUNK_ALLOC_FORCE means it must try to allocate one
*
*/
enum {
CHUNK_ALLOC_NO_FORCE = 0,
CHUNK_ALLOC_LIMITED = 1,
CHUNK_ALLOC_FORCE = 2,
};
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins);
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 flags,
int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
static void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes);
static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
{
smp_mb();
return cache->cached == BTRFS_CACHE_FINISHED ||
cache->cached == BTRFS_CACHE_ERROR;
}
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
{
atomic_inc(&cache->count);
}
void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
{
if (atomic_dec_and_test(&cache->count)) {
WARN_ON(cache->pinned > 0);
WARN_ON(cache->reserved > 0);
/*
* If not empty, someone is still holding mutex of
* full_stripe_lock, which can only be released by caller.
* And it will definitely cause use-after-free when caller
* tries to release full stripe lock.
*
* No better way to resolve, but only to warn.
*/
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl);
kfree(cache);
}
}
/*
* this adds the block group to the fs_info rb tree for the block group
* cache
*/
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group_cache *block_group)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct btrfs_block_group_cache *cache;
spin_lock(&info->block_group_cache_lock);
p = &info->block_group_cache_tree.rb_node;
while (*p) {
parent = *p;
cache = rb_entry(parent, struct btrfs_block_group_cache,
cache_node);
if (block_group->key.objectid < cache->key.objectid) {
p = &(*p)->rb_left;
} else if (block_group->key.objectid > cache->key.objectid) {
p = &(*p)->rb_right;
} else {
spin_unlock(&info->block_group_cache_lock);
return -EEXIST;
}
}
rb_link_node(&block_group->cache_node, parent, p);
rb_insert_color(&block_group->cache_node,
&info->block_group_cache_tree);
if (info->first_logical_byte > block_group->key.objectid)
info->first_logical_byte = block_group->key.objectid;
spin_unlock(&info->block_group_cache_lock);
return 0;
}
/*
* This will return the block group at or after bytenr if contains is 0, else
* it will return the block group that contains the bytenr
*/
static struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
int contains)
{
struct btrfs_block_group_cache *cache, *ret = NULL;
struct rb_node *n;
u64 end, start;
spin_lock(&info->block_group_cache_lock);
n = info->block_group_cache_tree.rb_node;
while (n) {
cache = rb_entry(n, struct btrfs_block_group_cache,
cache_node);
end = cache->key.objectid + cache->key.offset - 1;
start = cache->key.objectid;
if (bytenr < start) {
if (!contains && (!ret || start < ret->key.objectid))
ret = cache;
n = n->rb_left;
} else if (bytenr > start) {
if (contains && bytenr <= end) {
ret = cache;
break;
}
n = n->rb_right;
} else {
ret = cache;
break;
}
}
if (ret) {
btrfs_get_block_group(ret);
if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
info->first_logical_byte = ret->key.objectid;
}
spin_unlock(&info->block_group_cache_lock);
return ret;
}
static int add_excluded_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
u64 end = start + num_bytes - 1;
set_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
set_extent_bits(&fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE);
return 0;
}
static void free_excluded_extents(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
u64 start, end;
start = cache->key.objectid;
end = start + cache->key.offset - 1;
clear_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
clear_extent_bits(&fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE);
}
static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
u64 bytenr;
u64 *logical;
int stripe_len;
int i, nr, ret;
if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
cache->bytes_super += stripe_len;
ret = add_excluded_extent(fs_info, cache->key.objectid,
stripe_len);
if (ret)
return ret;
}
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
ret = btrfs_rmap_block(fs_info, cache->key.objectid,
bytenr, 0, &logical, &nr, &stripe_len);
if (ret)
return ret;
while (nr--) {
u64 start, len;
if (logical[nr] > cache->key.objectid +
cache->key.offset)
continue;
if (logical[nr] + stripe_len <= cache->key.objectid)
continue;
start = logical[nr];
if (start < cache->key.objectid) {
start = cache->key.objectid;
len = (logical[nr] + stripe_len) - start;
} else {
len = min_t(u64, stripe_len,
cache->key.objectid +
cache->key.offset - start);
}
cache->bytes_super += len;
ret = add_excluded_extent(fs_info, start, len);
if (ret) {
kfree(logical);
return ret;
}
}
kfree(logical);
}
return 0;
}
static struct btrfs_caching_control *
get_caching_control(struct btrfs_block_group_cache *cache)
{
struct btrfs_caching_control *ctl;
spin_lock(&cache->lock);
if (!cache->caching_ctl) {
spin_unlock(&cache->lock);
return NULL;
}
ctl = cache->caching_ctl;
refcount_inc(&ctl->count);
spin_unlock(&cache->lock);
return ctl;
}
static void put_caching_control(struct btrfs_caching_control *ctl)
{
if (refcount_dec_and_test(&ctl->count))
kfree(ctl);
}
#ifdef CONFIG_BTRFS_DEBUG
static void fragment_free_space(struct btrfs_block_group_cache *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 start = block_group->key.objectid;
u64 len = block_group->key.offset;
u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
fs_info->nodesize : fs_info->sectorsize;
u64 step = chunk << 1;
while (len > chunk) {
btrfs_remove_free_space(block_group, start, chunk);
start += step;
if (len < step)
len = 0;
else
len -= step;
}
}
#endif
/*
* this is only called by cache_block_group, since we could have freed extents
* we need to check the pinned_extents for any extents that can't be used yet
* since their free space will be released as soon as the transaction commits.
*/
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end)
{
u64 extent_start, extent_end, size, total_added = 0;
int ret;
while (start < end) {
ret = find_first_extent_bit(info->pinned_extents, start,
&extent_start, &extent_end,
EXTENT_DIRTY | EXTENT_UPTODATE,
NULL);
if (ret)
break;
if (extent_start <= start) {
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
total_added += size;
ret = btrfs_add_free_space(block_group, start,
size);
BUG_ON(ret); /* -ENOMEM or logic error */
start = extent_end + 1;
} else {
break;
}
}
if (start < end) {
size = end - start;
total_added += size;
ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret); /* -ENOMEM or logic error */
}
return total_added;
}
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{
struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
u64 total_found = 0;
u64 last = 0;
u32 nritems;
int ret;
bool wakeup = true;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
#ifdef CONFIG_BTRFS_DEBUG
/*
* If we're fragmenting we don't want to make anybody think we can
* allocate from this block group until we've had a chance to fragment
* the free space.
*/
if (btrfs_should_fragment_free_space(block_group))
wakeup = false;
#endif
/*
* We don't want to deadlock with somebody trying to allocate a new
* extent for the extent root while also trying to search the extent
* root to add free space. So we skip locking and search the commit
* root, since its read-only
*/
path->skip_locking = 1;
path->search_commit_root = 1;
path->reada = READA_FORWARD;
key.objectid = last;
key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
next:
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
while (1) {
if (btrfs_fs_closing(fs_info) > 1) {
last = (u64)-1;
break;
}
if (path->slots[0] < nritems) {
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
} else {
ret = find_next_key(path, 0, &key);
if (ret)
break;
if (need_resched() ||
rwsem_is_contended(&fs_info->commit_root_sem)) {
if (wakeup)
caching_ctl->progress = last;
btrfs_release_path(path);
up_read(&fs_info->commit_root_sem);
mutex_unlock(&caching_ctl->mutex);
cond_resched();
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
goto next;
}
ret = btrfs_next_leaf(extent_root, path);
if (ret < 0)
goto out;
if (ret)
break;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
continue;
}
if (key.objectid < last) {
key.objectid = last;
key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
if (wakeup)
caching_ctl->progress = last;
btrfs_release_path(path);
goto next;
}
if (key.objectid < block_group->key.objectid) {
path->slots[0]++;
continue;
}
if (key.objectid >= block_group->key.objectid +
block_group->key.offset)
break;
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY) {
total_found += add_new_free_space(block_group,
fs_info, last,
key.objectid);
if (key.type == BTRFS_METADATA_ITEM_KEY)
last = key.objectid +
fs_info->nodesize;
else
last = key.objectid + key.offset;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
if (wakeup)
wake_up(&caching_ctl->wait);
}
}
path->slots[0]++;
}
ret = 0;
total_found += add_new_free_space(block_group, fs_info, last,
block_group->key.objectid +
block_group->key.offset);
caching_ctl->progress = (u64)-1;
out:
btrfs_free_path(path);
return ret;
}
static noinline void caching_thread(struct btrfs_work *work)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_caching_control *caching_ctl;
struct btrfs_root *extent_root;
int ret;
caching_ctl = container_of(work, struct btrfs_caching_control, work);
block_group = caching_ctl->block_group;
fs_info = block_group->fs_info;
extent_root = fs_info->extent_root;
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
ret = load_free_space_tree(caching_ctl);
else
ret = load_extent_tree_free(caching_ctl);
spin_lock(&block_group->lock);
block_group->caching_ctl = NULL;
block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
spin_unlock(&block_group->lock);
#ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(block_group)) {
u64 bytes_used;
spin_lock(&block_group->space_info->lock);
spin_lock(&block_group->lock);
bytes_used = block_group->key.offset -
btrfs_block_group_used(&block_group->item);
block_group->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&block_group->lock);
spin_unlock(&block_group->space_info->lock);
fragment_free_space(block_group);
}
#endif
caching_ctl->progress = (u64)-1;
up_read(&fs_info->commit_root_sem);
free_excluded_extents(fs_info, block_group);
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
put_caching_control(caching_ctl);
btrfs_put_block_group(block_group);
}
static int cache_block_group(struct btrfs_block_group_cache *cache,
int load_cache_only)
{
DEFINE_WAIT(wait);
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl;
int ret = 0;
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
if (!caching_ctl)
return -ENOMEM;
INIT_LIST_HEAD(&caching_ctl->list);
mutex_init(&caching_ctl->mutex);
init_waitqueue_head(&caching_ctl->wait);
caching_ctl->block_group = cache;
caching_ctl->progress = cache->key.objectid;
refcount_set(&caching_ctl->count, 1);
btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
caching_thread, NULL, NULL);
spin_lock(&cache->lock);
/*
* This should be a rare occasion, but this could happen I think in the
* case where one thread starts to load the space cache info, and then
* some other thread starts a transaction commit which tries to do an
* allocation while the other thread is still loading the space cache
* info. The previous loop should have kept us from choosing this block
* group, but if we've moved to the state where we will wait on caching
* block groups we need to first check if we're doing a fast load here,
* so we can wait for it to finish, otherwise we could end up allocating
* from a block group who's cache gets evicted for one reason or
* another.
*/
while (cache->cached == BTRFS_CACHE_FAST) {
struct btrfs_caching_control *ctl;
ctl = cache->caching_ctl;
refcount_inc(&ctl->count);
prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&cache->lock);
schedule();
finish_wait(&ctl->wait, &wait);
put_caching_control(ctl);
spin_lock(&cache->lock);
}
if (cache->cached != BTRFS_CACHE_NO) {
spin_unlock(&cache->lock);
kfree(caching_ctl);
return 0;
}
WARN_ON(cache->caching_ctl);
cache->caching_ctl = caching_ctl;
cache->cached = BTRFS_CACHE_FAST;
spin_unlock(&cache->lock);
if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
mutex_lock(&caching_ctl->mutex);
ret = load_free_space_cache(fs_info, cache);
spin_lock(&cache->lock);
if (ret == 1) {
cache->caching_ctl = NULL;
cache->cached = BTRFS_CACHE_FINISHED;
cache->last_byte_to_unpin = (u64)-1;
caching_ctl->progress = (u64)-1;
} else {
if (load_cache_only) {
cache->caching_ctl = NULL;
cache->cached = BTRFS_CACHE_NO;
} else {
cache->cached = BTRFS_CACHE_STARTED;
cache->has_caching_ctl = 1;
}
}
spin_unlock(&cache->lock);
#ifdef CONFIG_BTRFS_DEBUG
if (ret == 1 &&
btrfs_should_fragment_free_space(cache)) {
u64 bytes_used;
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
bytes_used = cache->key.offset -
btrfs_block_group_used(&cache->item);
cache->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
fragment_free_space(cache);
}
#endif
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
if (ret == 1) {
put_caching_control(caching_ctl);
free_excluded_extents(fs_info, cache);
return 0;
}
} else {
/*
* We're either using the free space tree or no caching at all.
* Set cached to the appropriate value and wakeup any waiters.
*/
spin_lock(&cache->lock);
if (load_cache_only) {
cache->caching_ctl = NULL;
cache->cached = BTRFS_CACHE_NO;
} else {
cache->cached = BTRFS_CACHE_STARTED;
cache->has_caching_ctl = 1;
}
spin_unlock(&cache->lock);
wake_up(&caching_ctl->wait);
}
if (load_cache_only) {
put_caching_control(caching_ctl);
return 0;
}
down_write(&fs_info->commit_root_sem);
refcount_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->commit_root_sem);
btrfs_get_block_group(cache);
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
return ret;
}
/*
* return the block group that starts at or after bytenr
*/
static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
{
return block_group_cache_tree_search(info, bytenr, 0);
}
/*
* return the block group that contains the given bytenr
*/
struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info,
u64 bytenr)
{
return block_group_cache_tree_search(info, bytenr, 1);
}
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & flags) {
rcu_read_unlock();
return found;
}
}
rcu_read_unlock();
return NULL;
}
static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
u64 owner, u64 root_objectid)
{
struct btrfs_space_info *space_info;
u64 flags;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
flags = BTRFS_BLOCK_GROUP_SYSTEM;
else
flags = BTRFS_BLOCK_GROUP_METADATA;
} else {
flags = BTRFS_BLOCK_GROUP_DATA;
}
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
}
/*
* after adding space to the filesystem, we need to clear the full flags
* on all the space infos.
*/
void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list)
found->full = 0;
rcu_read_unlock();
}
/* simple helper to search for an existing data extent at a given offset */
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
{
int ret;
struct btrfs_key key;
struct btrfs_path *path;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = start;
key.offset = len;
key.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
btrfs_free_path(path);
return ret;
}
/*
* helper function to lookup reference count and flags of a tree block.
*
* the head node for delayed ref is used to store the sum of all the
* reference count modifications queued up in the rbtree. the head
* node may also store the extent flags to set. This way you can check
* to see what the reference count and extent flags would be if all of
* the delayed refs are not processed.
*/
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
struct extent_buffer *leaf;
struct btrfs_key key;
u32 item_size;
u64 num_refs;
u64 extent_flags;
int ret;
/*
* If we don't have skinny metadata, don't bother doing anything
* different
*/
if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
offset = fs_info->nodesize;
metadata = 0;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (!trans) {
path->skip_locking = 1;
path->search_commit_root = 1;
}
search_again:
key.objectid = bytenr;
key.offset = offset;
if (metadata)
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out_free;
if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
if (key.objectid == bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
key.offset == fs_info->nodesize)
ret = 0;
}
}
if (ret == 0) {
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
if (item_size >= sizeof(*ei)) {
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_item);
num_refs = btrfs_extent_refs(leaf, ei);
extent_flags = btrfs_extent_flags(leaf, ei);
} else {
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
struct btrfs_extent_item_v0 *ei0;
BUG_ON(item_size != sizeof(*ei0));
ei0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_item_v0);
num_refs = btrfs_extent_refs_v0(leaf, ei0);
/* FIXME: this isn't correct for data */
extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
#else
BUG();
#endif
}
BUG_ON(num_refs == 0);
} else {
num_refs = 0;
extent_flags = 0;
ret = 0;
}
if (!trans)
goto out;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
/*
* Mutex was contended, block until it's released and try
* again
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
goto search_again;
}
spin_lock(&head->lock);
if (head->extent_op && head->extent_op->update_flags)
extent_flags |= head->extent_op->flags_to_set;
else
BUG_ON(num_refs == 0);
num_refs += head->ref_mod;
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
}
spin_unlock(&delayed_refs->lock);
out:
WARN_ON(num_refs == 0);
if (refs)
*refs = num_refs;
if (flags)
*flags = extent_flags;
out_free:
btrfs_free_path(path);
return ret;
}
/*
* Back reference rules. Back refs have three main goals:
*
* 1) differentiate between all holders of references to an extent so that
* when a reference is dropped we can make sure it was a valid reference
* before freeing the extent.
*
* 2) Provide enough information to quickly find the holders of an extent
* if we notice a given block is corrupted or bad.
*
* 3) Make it easy to migrate blocks for FS shrinking or storage pool
* maintenance. This is actually the same as #2, but with a slightly
* different use case.
*
* There are two kinds of back refs. The implicit back refs is optimized
* for pointers in non-shared tree blocks. For a given pointer in a block,
* back refs of this kind provide information about the block's owner tree
* and the pointer's key. These information allow us to find the block by
* b-tree searching. The full back refs is for pointers in tree blocks not
* referenced by their owner trees. The location of tree block is recorded
* in the back refs. Actually the full back refs is generic, and can be
* used in all cases the implicit back refs is used. The major shortcoming
* of the full back refs is its overhead. Every time a tree block gets
* COWed, we have to update back refs entry for all pointers in it.
*
* For a newly allocated tree block, we use implicit back refs for
* pointers in it. This means most tree related operations only involve
* implicit back refs. For a tree block created in old transaction, the
* only way to drop a reference to it is COW it. So we can detect the
* event that tree block loses its owner tree's reference and do the
* back refs conversion.
*
* When a tree block is COWed through a tree, there are four cases:
*
* The reference count of the block is one and the tree is the block's
* owner tree. Nothing to do in this case.
*
* The reference count of the block is one and the tree is not the
* block's owner tree. In this case, full back refs is used for pointers
* in the block. Remove these full back refs, add implicit back refs for
* every pointers in the new block.
*
* The reference count of the block is greater than one and the tree is
* the block's owner tree. In this case, implicit back refs is used for
* pointers in the block. Add full back refs for every pointers in the
* block, increase lower level extents' reference counts. The original
* implicit back refs are entailed to the new block.
*
* The reference count of the block is greater than one and the tree is
* not the block's owner tree. Add implicit back refs for every pointer in
* the new block, increase lower level extents' reference count.
*
* Back Reference Key composing:
*
* The key objectid corresponds to the first byte in the extent,
* The key type is used to differentiate between types of back refs.
* There are different meanings of the key offset for different types
* of back refs.
*
* File extents can be referenced by:
*
* - multiple snapshots, subvolumes, or different generations in one subvol
* - different files inside a single subvolume
* - different offsets inside a file (bookend extents in file.c)
*
* The extent ref structure for the implicit back refs has fields for:
*
* - Objectid of the subvolume root
* - objectid of the file holding the reference
* - original offset in the file
* - how many bookend extents
*
* The key offset for the implicit back refs is hash of the first
* three fields.
*
* The extent ref structure for the full back refs has field for:
*
* - number of pointers in the tree leaf
*
* The key offset for the implicit back refs is the first byte of
* the tree leaf
*
* When a file extent is allocated, The implicit back refs is used.
* the fields are filled in:
*
* (root_key.objectid, inode objectid, offset in file, 1)
*
* When a file extent is removed file truncation, we find the
* corresponding implicit back refs and check the following fields:
*
* (btrfs_header_owner(leaf), inode objectid, offset in file)
*
* Btree extents can be referenced by:
*
* - Different subvolumes
*
* Both the implicit back refs and the full back refs for tree blocks
* only consist of key. The key offset for the implicit back refs is
* objectid of block's owner tree. The key offset for the full back refs
* is the first byte of parent block.
*
* When implicit back refs is used, information about the lowest key and
* level of the tree block are required. These information are stored in
* tree block info structure.
*/
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 owner, u32 extra_size)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_extent_item *item;
struct btrfs_extent_item_v0 *ei0;
struct btrfs_extent_ref_v0 *ref0;
struct btrfs_tree_block_info *bi;
struct extent_buffer *leaf;
struct btrfs_key key;
struct btrfs_key found_key;
u32 new_size = sizeof(*item);
u64 refs;
int ret;
leaf = path->nodes[0];
BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
ei0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_item_v0);
refs = btrfs_extent_refs_v0(leaf, ei0);
if (owner == (u64)-1) {
while (1) {
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
return ret;
BUG_ON(ret > 0); /* Corruption */
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0]);
BUG_ON(key.objectid != found_key.objectid);
if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
path->slots[0]++;
continue;
}
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
owner = btrfs_ref_objectid_v0(leaf, ref0);
break;
}
}
btrfs_release_path(path);
if (owner < BTRFS_FIRST_FREE_OBJECTID)
new_size += sizeof(*bi);
new_size -= sizeof(*ei0);
ret = btrfs_search_slot(trans, root, &key, path,
new_size + extra_size, 1);
if (ret < 0)
return ret;
BUG_ON(ret); /* Corruption */
btrfs_extend_item(fs_info, path, new_size);
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
btrfs_set_extent_refs(leaf, item, refs);
/* FIXME: get real generation */
btrfs_set_extent_generation(leaf, item, 0);
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
btrfs_set_extent_flags(leaf, item,
BTRFS_EXTENT_FLAG_TREE_BLOCK |
BTRFS_BLOCK_FLAG_FULL_BACKREF);
bi = (struct btrfs_tree_block_info *)(item + 1);
/* FIXME: get first key of the block */
memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
btrfs_set_tree_block_level(leaf, bi, (int)owner);
} else {
btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
}
btrfs_mark_buffer_dirty(leaf);
return 0;
}
#endif
/*
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
* is_data == BTRFS_REF_TYPE_DATA, data type is requried,
* is_data == BTRFS_REF_TYPE_ANY, either type is OK.
*/
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
struct btrfs_extent_inline_ref *iref,
enum btrfs_inline_ref_type is_data)
{
int type = btrfs_extent_inline_ref_type(eb, iref);
u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
if (type == BTRFS_TREE_BLOCK_REF_KEY ||
type == BTRFS_SHARED_BLOCK_REF_KEY ||
type == BTRFS_SHARED_DATA_REF_KEY ||
type == BTRFS_EXTENT_DATA_REF_KEY) {
if (is_data == BTRFS_REF_TYPE_BLOCK) {
if (type == BTRFS_TREE_BLOCK_REF_KEY)
return type;
if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
ASSERT(eb->fs_info);
/*
* Every shared one has parent tree
* block, which must be aligned to
* nodesize.
*/
if (offset &&
IS_ALIGNED(offset, eb->fs_info->nodesize))
return type;
}
} else if (is_data == BTRFS_REF_TYPE_DATA) {
if (type == BTRFS_EXTENT_DATA_REF_KEY)
return type;
if (type == BTRFS_SHARED_DATA_REF_KEY) {
ASSERT(eb->fs_info);
/*
* Every shared one has parent tree
* block, which must be aligned to
* nodesize.
*/
if (offset &&
IS_ALIGNED(offset, eb->fs_info->nodesize))
return type;
}
} else {
ASSERT(is_data == BTRFS_REF_TYPE_ANY);
return type;
}
}
btrfs_print_leaf((struct extent_buffer *)eb);
btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
eb->start, type);
WARN_ON(1);
return BTRFS_REF_TYPE_INVALID;
}
static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
{
u32 high_crc = ~(u32)0;
u32 low_crc = ~(u32)0;
__le64 lenum;
lenum = cpu_to_le64(root_objectid);
high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(owner);
low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(offset);
low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
return ((u64)high_crc << 31) ^ (u64)low_crc;
}
static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *ref)
{
return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
btrfs_extent_data_ref_objectid(leaf, ref),
btrfs_extent_data_ref_offset(leaf, ref));
}
static int match_extent_data_ref(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *ref,
u64 root_objectid, u64 owner, u64 offset)
{
if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
return 0;
return 1;
}
static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid,
u64 owner, u64 offset)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_key key;
struct btrfs_extent_data_ref *ref;
struct extent_buffer *leaf;
u32 nritems;
int ret;
int recow;
int err = -ENOENT;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_DATA_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_EXTENT_DATA_REF_KEY;
key.offset = hash_extent_data_ref(root_objectid,
owner, offset);
}
again:
recow = 0;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto fail;
}
if (parent) {
if (!ret)
return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
key.type = BTRFS_EXTENT_REF_V0_KEY;
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto fail;
}
if (!ret)
return 0;
#endif
goto fail;
}
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
while (1) {
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
err = ret;
if (ret)
goto fail;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
recow = 1;
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != bytenr ||
key.type != BTRFS_EXTENT_DATA_REF_KEY)
goto fail;
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset)) {
if (recow) {
btrfs_release_path(path);
goto again;
}
err = 0;
break;
}
path->slots[0]++;
}
fail:
return err;
}
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid, u64 owner,
u64 offset, int refs_to_add)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_key key;
struct extent_buffer *leaf;
u32 size;
u32 num_refs;
int ret;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_DATA_REF_KEY;
key.offset = parent;
size = sizeof(struct btrfs_shared_data_ref);
} else {
key.type = BTRFS_EXTENT_DATA_REF_KEY;
key.offset = hash_extent_data_ref(root_objectid,
owner, offset);
size = sizeof(struct btrfs_extent_data_ref);
}
ret = btrfs_insert_empty_item(trans, root, path, &key, size);
if (ret && ret != -EEXIST)
goto fail;
leaf = path->nodes[0];
if (parent) {
struct btrfs_shared_data_ref *ref;
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
if (ret == 0) {
btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
} else {
num_refs = btrfs_shared_data_ref_count(leaf, ref);
num_refs += refs_to_add;
btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
}
} else {
struct btrfs_extent_data_ref *ref;
while (ret == -EEXIST) {
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset))
break;
btrfs_release_path(path);
key.offset++;
ret = btrfs_insert_empty_item(trans, root, path, &key,
size);
if (ret && ret != -EEXIST)
goto fail;
leaf = path->nodes[0];
}
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (ret == 0) {
btrfs_set_extent_data_ref_root(leaf, ref,
root_objectid);
btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
btrfs_set_extent_data_ref_offset(leaf, ref, offset);
btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
} else {
num_refs = btrfs_extent_data_ref_count(leaf, ref);
num_refs += refs_to_add;
btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
}
}
btrfs_mark_buffer_dirty(leaf);
ret = 0;
fail:
btrfs_release_path(path);
return ret;
}
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int refs_to_drop, int *last_ref)
{
struct btrfs_key key;
struct btrfs_extent_data_ref *ref1 = NULL;
struct btrfs_shared_data_ref *ref2 = NULL;
struct extent_buffer *leaf;
u32 num_refs = 0;
int ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
ref1 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
num_refs = btrfs_ref_count_v0(leaf, ref0);
#endif
} else {
BUG();
}
BUG_ON(num_refs < refs_to_drop);
num_refs -= refs_to_drop;
if (num_refs == 0) {
ret = btrfs_del_item(trans, fs_info->extent_root, path);
*last_ref = 1;
} else {
if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
else {
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
btrfs_set_ref_count_v0(leaf, ref0, num_refs);
}
#endif
btrfs_mark_buffer_dirty(leaf);
}
return ret;
}
static noinline u32 extent_data_ref_count(struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref)
{
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_extent_data_ref *ref1;
struct btrfs_shared_data_ref *ref2;
u32 num_refs = 0;
int type;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (iref) {
/*
* If type is invalid, we should have bailed out earlier than
* this call.
*/
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
ASSERT(type != BTRFS_REF_TYPE_INVALID);
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else {
ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
}
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
ref1 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
num_refs = btrfs_ref_count_v0(leaf, ref0);
#endif
} else {
WARN_ON(1);
}
return num_refs;
}
static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_key key;
int ret;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
key.offset = root_objectid;
}
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (ret == -ENOENT && parent) {
btrfs_release_path(path);
key.type = BTRFS_EXTENT_REF_V0_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
}
#endif
return ret;
}
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
{
struct btrfs_key key;
int ret;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
key.offset = root_objectid;
}
ret = btrfs_insert_empty_item(trans, fs_info->extent_root,
path, &key, 0);
btrfs_release_path(path);
return ret;
}
static inline int extent_ref_type(u64 parent, u64 owner)
{
int type;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
if (parent > 0)
type = BTRFS_SHARED_BLOCK_REF_KEY;
else
type = BTRFS_TREE_BLOCK_REF_KEY;
} else {
if (parent > 0)
type = BTRFS_SHARED_DATA_REF_KEY;
else
type = BTRFS_EXTENT_DATA_REF_KEY;
}
return type;
}
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key)
{
for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path->nodes[level])
break;
if (path->slots[level] + 1 >=
btrfs_header_nritems(path->nodes[level]))
continue;
if (level == 0)
btrfs_item_key_to_cpu(path->nodes[level], key,
path->slots[level] + 1);
else
btrfs_node_key_to_cpu(path->nodes[level], key,
path->slots[level] + 1);
return 0;
}
return 1;
}
/*
* look for inline back ref. if back ref is found, *ref_ret is set
* to the address of inline back ref, and 0 is returned.
*
* if back ref isn't found, *ref_ret is set to the address where it
* should be inserted, and -ENOENT is returned.
*
* if insert is true and there are too many inline back refs, the path
* points to the extent item, and -EAGAIN is returned.
*
* NOTE: inline back refs are ordered in the same way that back ref
* items in the tree are ordered.
*/
static noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int insert)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
u64 flags;
u64 item_size;
unsigned long ptr;
unsigned long end;
int extra_size;
int type;
int want;
int ret;
int err = 0;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
int needed;
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
want = extent_ref_type(parent, owner);
if (insert) {
extra_size = btrfs_extent_inline_ref_size(want);
path->keep_locks = 1;
} else
extra_size = -1;
/*
* Owner is our parent level, so we can just add one to get the level
* for the block we are interested in.
*/
if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = owner;
}
again:
ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
if (ret < 0) {
err = ret;
goto out;
}
/*
* We may be a newly converted file system which still has the old fat
* extent entries for metadata, so try and see if we have one of those.
*/
if (ret > 0 && skinny_metadata) {
skinny_metadata = false;
if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
if (key.objectid == bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
key.offset == num_bytes)
ret = 0;
}
if (ret) {
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
btrfs_release_path(path);
goto again;
}
}
if (ret && !insert) {
err = -ENOENT;
goto out;
} else if (WARN_ON(ret)) {
err = -EIO;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
if (!insert) {
err = -ENOENT;
goto out;
}
ret = convert_extent_item_v0(trans, fs_info, path, owner,
extra_size);
if (ret < 0) {
err = ret;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
}
#endif
BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
ptr = (unsigned long)(ei + 1);
end = (unsigned long)ei + item_size;
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
ptr += sizeof(struct btrfs_tree_block_info);
BUG_ON(ptr > end);
}
if (owner >= BTRFS_FIRST_FREE_OBJECTID)
needed = BTRFS_REF_TYPE_DATA;
else
needed = BTRFS_REF_TYPE_BLOCK;
err = -ENOENT;
while (1) {
if (ptr >= end) {
WARN_ON(ptr > end);
break;
}
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
if (type == BTRFS_REF_TYPE_INVALID) {
err = -EINVAL;
goto out;
}
if (want < type)
break;
if (want > type) {
ptr += btrfs_extent_inline_ref_size(type);
continue;
}
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
struct btrfs_extent_data_ref *dref;
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (match_extent_data_ref(leaf, dref, root_objectid,
owner, offset)) {
err = 0;
break;
}
if (hash_extent_data_ref_item(leaf, dref) <
hash_extent_data_ref(root_objectid, owner, offset))
break;
} else {
u64 ref_offset;
ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
if (parent > 0) {
if (parent == ref_offset) {
err = 0;
break;
}
if (ref_offset < parent)
break;
} else {
if (root_objectid == ref_offset) {
err = 0;
break;
}
if (ref_offset < root_objectid)
break;
}
}
ptr += btrfs_extent_inline_ref_size(type);
}
if (err == -ENOENT && insert) {
if (item_size + extra_size >=
BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
err = -EAGAIN;
goto out;
}
/*
* To add new inline back ref, we have to make sure
* there is no corresponding back ref item.
* For simplicity, we just do not add new inline back
* ref if there is any kind of item for this block
*/
if (find_next_key(path, 0, &key) == 0 &&
key.objectid == bytenr &&
key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
err = -EAGAIN;
goto out;
}
}
*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
if (insert) {
path->keep_locks = 0;
btrfs_unlock_up_safe(path, 1);
}
return err;
}
/*
* helper to add new inline back ref
*/
static noinline_for_stack
void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
unsigned long ptr;
unsigned long end;
unsigned long item_offset;
u64 refs;
int size;
int type;
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
item_offset = (unsigned long)iref - (unsigned long)ei;
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
btrfs_extend_item(fs_info, path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
refs += refs_to_add;
btrfs_set_extent_refs(leaf, ei, refs);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei);
ptr = (unsigned long)ei + item_offset;
end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
if (ptr < end - size)
memmove_extent_buffer(leaf, ptr + size, ptr,
end - size - ptr);
iref = (struct btrfs_extent_inline_ref *)ptr;
btrfs_set_extent_inline_ref_type(leaf, iref, type);
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
struct btrfs_extent_data_ref *dref;
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
btrfs_set_extent_data_ref_offset(leaf, dref, offset);
btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
struct btrfs_shared_data_ref *sref;
sref = (struct btrfs_shared_data_ref *)(iref + 1);
btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
} else {
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
btrfs_mark_buffer_dirty(leaf);
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset)
{
int ret;
ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret,
bytenr, num_bytes, parent,
root_objectid, owner, offset, 0);
if (ret != -ENOENT)
return ret;
btrfs_release_path(path);
*ref_ret = NULL;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = lookup_tree_block_ref(trans, fs_info, path, bytenr,
parent, root_objectid);
} else {
ret = lookup_extent_data_ref(trans, fs_info, path, bytenr,
parent, root_objectid, owner,
offset);
}
return ret;
}
/*
* helper to update/remove inline back ref
*/
static noinline_for_stack
void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
struct btrfs_delayed_extent_op *extent_op,
int *last_ref)
{
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL;
unsigned long ptr;
unsigned long end;
u32 item_size;
int size;
int type;
u64 refs;
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
refs += refs_to_mod;
btrfs_set_extent_refs(leaf, ei, refs);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei);
/*
* If type is invalid, we should have bailed out after
* lookup_inline_extent_backref().
*/
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
ASSERT(type != BTRFS_REF_TYPE_INVALID);
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
refs = btrfs_extent_data_ref_count(leaf, dref);
} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
sref = (struct btrfs_shared_data_ref *)(iref + 1);
refs = btrfs_shared_data_ref_count(leaf, sref);
} else {
refs = 1;
BUG_ON(refs_to_mod != -1);
}
BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
refs += refs_to_mod;
if (refs > 0) {
if (type == BTRFS_EXTENT_DATA_REF_KEY)
btrfs_set_extent_data_ref_count(leaf, dref, refs);
else
btrfs_set_shared_data_ref_count(leaf, sref, refs);
} else {
*last_ref = 1;
size = btrfs_extent_inline_ref_size(type);
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ptr = (unsigned long)iref;
end = (unsigned long)ei + item_size;
if (ptr + size < end)
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
btrfs_truncate_item(fs_info, path, item_size, 1);
}
btrfs_mark_buffer_dirty(leaf);
}
static noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner,
u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_extent_inline_ref *iref;
int ret;
ret = lookup_inline_extent_backref(trans, fs_info, path, &iref,
bytenr, num_bytes, parent,
root_objectid, owner, offset, 1);
if (ret == 0) {
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
update_inline_extent_backref(fs_info, path, iref,
refs_to_add, extent_op, NULL);
} else if (ret == -ENOENT) {
setup_inline_extent_backref(fs_info, path, iref, parent,
root_objectid, owner, offset,
refs_to_add, extent_op);
ret = 0;
}
return ret;
}
static int insert_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add)
{
int ret;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
BUG_ON(refs_to_add != 1);
ret = insert_tree_block_ref(trans, fs_info, path, bytenr,
parent, root_objectid);
} else {
ret = insert_extent_data_ref(trans, fs_info, path, bytenr,
parent, root_objectid,
owner, offset, refs_to_add);
}
return ret;
}
static int remove_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_drop, int is_data, int *last_ref)
{
int ret = 0;
BUG_ON(!is_data && refs_to_drop != 1);
if (iref) {
update_inline_extent_backref(fs_info, path, iref,
-refs_to_drop, NULL, last_ref);
} else if (is_data) {
ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop,
last_ref);
} else {
*last_ref = 1;
ret = btrfs_del_item(trans, fs_info->extent_root, path);
}
return ret;
}
#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
u64 *discarded_bytes)
{
int j, ret = 0;
u64 bytes_left, end;
u64 aligned_start = ALIGN(start, 1 << 9);
if (WARN_ON(start != aligned_start)) {
len -= aligned_start - start;
len = round_down(len, 1 << 9);
start = aligned_start;
}
*discarded_bytes = 0;
if (!len)
return 0;
end = start + len;
bytes_left = len;
/* Skip any superblocks on this device. */
for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
u64 sb_start = btrfs_sb_offset(j);
u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
u64 size = sb_start - start;
if (!in_range(sb_start, start, bytes_left) &&
!in_range(sb_end, start, bytes_left) &&
!in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
continue;
/*
* Superblock spans beginning of range. Adjust start and
* try again.
*/
if (sb_start <= start) {
start += sb_end - start;
if (start > end) {
bytes_left = 0;
break;
}
bytes_left = end - start;
continue;
}
if (size) {
ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
GFP_NOFS, 0);
if (!ret)
*discarded_bytes += size;
else if (ret != -EOPNOTSUPP)
return ret;
}
start = sb_end;
if (start > end) {
bytes_left = 0;
break;
}
bytes_left = end - start;
}
if (bytes_left) {
ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
GFP_NOFS, 0);
if (!ret)
*discarded_bytes += bytes_left;
}
return ret;
}
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes)
{
int ret;
u64 discarded_bytes = 0;
struct btrfs_bio *bbio = NULL;
/*
* Avoid races with device replace and make sure our bbio has devices
* associated to its stripes that don't go away while we are discarding.
*/
btrfs_bio_counter_inc_blocked(fs_info);
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
&bbio, 0);
/* Error condition is -ENOMEM */
if (!ret) {
struct btrfs_bio_stripe *stripe = bbio->stripes;
int i;
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
u64 bytes;
struct request_queue *req_q;
if (!stripe->dev->bdev) {
ASSERT(btrfs_test_opt(fs_info, DEGRADED));
continue;
}
req_q = bdev_get_queue(stripe->dev->bdev);
if (!blk_queue_discard(req_q))
continue;
ret = btrfs_issue_discard(stripe->dev->bdev,
stripe->physical,
stripe->length,
&bytes);
if (!ret)
discarded_bytes += bytes;
else if (ret != -EOPNOTSUPP)
break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
/*
* Just in case we get back EOPNOTSUPP for some reason,
* just ignore the return value so we don't screw up
* people calling discard_extent.
*/
ret = 0;
}
btrfs_put_bbio(bbio);
}
btrfs_bio_counter_dec(fs_info);
if (actual_bytes)
*actual_bytes = discarded_bytes;
if (ret == -EOPNOTSUPP)
ret = 0;
return ret;
}
/* Can return -ENOMEM */
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int old_ref_mod, new_ref_mod;
int ret;
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
root_objectid == BTRFS_TREE_LOG_OBJECTID);
btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
owner, offset, BTRFS_ADD_DELAYED_REF);
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
num_bytes, parent,
root_objectid, (int)owner,
BTRFS_ADD_DELAYED_REF, NULL,
&old_ref_mod, &new_ref_mod);
} else {
ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
num_bytes, parent,
root_objectid, owner, offset,
0, BTRFS_ADD_DELAYED_REF,
&old_ref_mod, &new_ref_mod);
}
if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
add_pinned_bytes(fs_info, -num_bytes, owner, root_objectid);
return ret;
}
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_extent_item *item;
struct btrfs_key key;
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
u64 refs;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_FORWARD;
path->leave_spinning = 1;
/* this will setup the path even if it fails to insert the back ref */
ret = insert_inline_extent_backref(trans, fs_info, path, bytenr,
num_bytes, parent, root_objectid,
owner, offset,
refs_to_add, extent_op);
if ((ret < 0 && ret != -EAGAIN) || !ret)
goto out;
/*
* Ok we had -EAGAIN which means we didn't have space to insert and
* inline extent ref, so just update the reference count and add a
* normal backref.
*/
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, item);
btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, item);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
path->reada = READA_FORWARD;
path->leave_spinning = 1;
/* now insert the actual backref */
ret = insert_extent_backref(trans, fs_info, path, bytenr, parent,
root_objectid, owner, offset, refs_to_add);
if (ret)
btrfs_abort_transaction(trans, ret);
out:
btrfs_free_path(path);
return ret;
}
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
int ret = 0;
struct btrfs_delayed_data_ref *ref;
struct btrfs_key ins;
u64 parent = 0;
u64 ref_root = 0;
u64 flags = 0;
ins.objectid = node->bytenr;
ins.offset = node->num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_data_ref(node);
trace_run_delayed_data_ref(fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
ref_root = ref->root;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
if (extent_op)
flags |= extent_op->flags_to_set;
ret = alloc_reserved_file_extent(trans, fs_info,
parent, ref_root, flags,
ref->objectid, ref->offset,
&ins, node->ref_mod);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
ret = __btrfs_free_extent(trans, fs_info, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
} else {
BUG();
}
return ret;
}
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei)
{
u64 flags = btrfs_extent_flags(leaf, ei);
if (extent_op->update_flags) {
flags |= extent_op->flags_to_set;
btrfs_set_extent_flags(leaf, ei, flags);
}
if (extent_op->update_key) {
struct btrfs_tree_block_info *bi;
BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
bi = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
}
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
struct extent_buffer *leaf;
u32 item_size;
int ret;
int err = 0;
int metadata = !extent_op->is_data;
if (trans->aborted)
return 0;
if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
metadata = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = head->bytenr;
if (metadata) {
key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = extent_op->level;
} else {
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = head->num_bytes;
}
again:
path->reada = READA_FORWARD;
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
if (metadata) {
if (path->slots[0] > 0) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
if (key.objectid == head->bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
key.offset == head->num_bytes)
ret = 0;
}
if (ret > 0) {
btrfs_release_path(path);
metadata = 0;
key.objectid = head->bytenr;
key.offset = head->num_bytes;
key.type = BTRFS_EXTENT_ITEM_KEY;
goto again;
}
} else {
err = -EIO;
goto out;
}
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0);
if (ret < 0) {
err = ret;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
}
#endif
BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
return err;
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
int ret = 0;
struct btrfs_delayed_tree_ref *ref;
struct btrfs_key ins;
u64 parent = 0;
u64 ref_root = 0;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
ref = btrfs_delayed_node_to_tree_ref(node);
trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
ref_root = ref->root;
ins.objectid = node->bytenr;
if (skinny_metadata) {
ins.offset = ref->level;
ins.type = BTRFS_METADATA_ITEM_KEY;
} else {
ins.offset = node->num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
}
if (node->ref_mod != 1) {
btrfs_err(fs_info,
"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
return -EIO;
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, fs_info,
parent, ref_root,
extent_op->flags_to_set,
&extent_op->key,
ref->level, &ins);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, fs_info, node,
parent, ref_root,
ref->level, 0, 1,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
ret = __btrfs_free_extent(trans, fs_info, node,
parent, ref_root,
ref->level, 0, 1, extent_op);
} else {
BUG();
}
return ret;
}
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
int ret = 0;
if (trans->aborted) {
if (insert_reserved)
btrfs_pin_extent(fs_info, node->bytenr,
node->num_bytes, 1);
return 0;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
insert_reserved);
else
BUG();
return ret;
}
static inline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
if (RB_EMPTY_ROOT(&head->ref_tree))
return NULL;
/*
* Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
* This is to prevent a ref count from going down to zero, which deletes
* the extent item from the extent tree, when there still are references
* to add, which would fail because they would not find the extent item.
*/
if (!list_empty(&head->ref_add_list))
return list_first_entry(&head->ref_add_list,
struct btrfs_delayed_ref_node, add_list);
ref = rb_entry(rb_first(&head->ref_tree),
struct btrfs_delayed_ref_node, ref_node);
ASSERT(list_empty(&ref->add_list));
return ref;
}
static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
spin_lock(&delayed_refs->lock);
head->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(head);
}
static int cleanup_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
int ret;
if (!extent_op)
return 0;
head->extent_op = NULL;
if (head->must_insert_reserved) {
btrfs_free_delayed_extent_op(extent_op);
return 0;
}
spin_unlock(&head->lock);
ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
btrfs_free_delayed_extent_op(extent_op);
return ret ? ret : 1;
}
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
delayed_refs = &trans->transaction->delayed_refs;
ret = cleanup_extent_op(trans, fs_info, head);
if (ret < 0) {
unselect_delayed_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
return ret;
} else if (ret) {
return ret;
}
/*
* Need to drop our head ref lock and re-acquire the delayed ref lock
* and then re-check to make sure nobody got added.
*/
spin_unlock(&head->lock);
spin_lock(&delayed_refs->lock);
spin_lock(&head->lock);
if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
return 1;
}
delayed_refs->num_heads--;
rb_erase(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
spin_unlock(&delayed_refs->lock);
spin_unlock(&head->lock);
atomic_dec(&delayed_refs->num_entries);
trace_run_delayed_ref_head(fs_info, head, 0);
if (head->total_ref_mod < 0) {
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(fs_info, head->bytenr);
ASSERT(cache);
percpu_counter_add(&cache->space_info->total_bytes_pinned,
-head->num_bytes);
btrfs_put_block_group(cache);
if (head->is_data) {
spin_lock(&delayed_refs->lock);
delayed_refs->pending_csums -= head->num_bytes;
spin_unlock(&delayed_refs->lock);
}
}
if (head->must_insert_reserved) {
btrfs_pin_extent(fs_info, head->bytenr,
head->num_bytes, 1);
if (head->is_data) {
ret = btrfs_del_csums(trans, fs_info, head->bytenr,
head->num_bytes);
}
}
/* Also free its reserved qgroup space */
btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
head->qgroup_reserved);
btrfs_delayed_ref_unlock(head);
btrfs_put_delayed_ref_head(head);
return 0;
}
/*
* Returns 0 on success or if called with an already aborted transaction.
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
*/
static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
unsigned long nr)
{
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
ktime_t start = ktime_get();
int ret;
unsigned long count = 0;
unsigned long actual_count = 0;
int must_insert_reserved = 0;
delayed_refs = &trans->transaction->delayed_refs;
while (1) {
if (!locked_ref) {
if (count >= nr)
break;
spin_lock(&delayed_refs->lock);
locked_ref = btrfs_select_ref_head(trans);
if (!locked_ref) {
spin_unlock(&delayed_refs->lock);
break;
}
/* grab the lock that says we are going to process
* all the refs for this head */
ret = btrfs_delayed_ref_lock(trans, locked_ref);
spin_unlock(&delayed_refs->lock);
/*
* we may have dropped the spin lock to get the head
* mutex lock, and that might have given someone else
* time to free the head. If that's true, it has been
* removed from our list and we can move on.
*/
if (ret == -EAGAIN) {
locked_ref = NULL;
count++;
continue;
}
}
/*
* We need to try and merge add/drops of the same ref since we
* can run into issues with relocate dropping the implicit ref
* and then it being added back again before the drop can
* finish. If we merged anything we need to re-loop so we can
* get a good ref.
* Or we can get node references of the same type that weren't
* merged when created due to bumps in the tree mod seq, and
* we need to merge them to prevent adding an inline extent
* backref before dropping it (triggering a BUG_ON at
* insert_inline_extent_backref()).
*/
spin_lock(&locked_ref->lock);
btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
locked_ref);
/*
* locked_ref is the head node, so we have to go one
* node back for any delayed ref updates
*/
ref = select_delayed_ref(locked_ref);
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
unselect_delayed_ref_head(delayed_refs, locked_ref);
locked_ref = NULL;
cond_resched();
count++;
continue;
}
/*
* We're done processing refs in this ref_head, clean everything
* up and move on to the next ref_head.
*/
if (!ref) {
ret = cleanup_ref_head(trans, fs_info, locked_ref);
if (ret > 0 ) {
/* We dropped our lock, we need to loop. */
ret = 0;
continue;
} else if (ret) {
return ret;
}
locked_ref = NULL;
count++;
continue;
}
actual_count++;
ref->in_tree = 0;
rb_erase(&ref->ref_node, &locked_ref->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
/*
* When we play the delayed ref, also correct the ref_mod on
* head
*/
switch (ref->action) {
case BTRFS_ADD_DELAYED_REF:
case BTRFS_ADD_DELAYED_EXTENT:
locked_ref->ref_mod -= ref->ref_mod;
break;
case BTRFS_DROP_DELAYED_REF:
locked_ref->ref_mod += ref->ref_mod;
break;
default:
WARN_ON(1);
}
atomic_dec(&delayed_refs->num_entries);
/*
* Record the must-insert_reserved flag before we drop the spin
* lock.
*/
must_insert_reserved = locked_ref->must_insert_reserved;
locked_ref->must_insert_reserved = 0;
extent_op = locked_ref->extent_op;
locked_ref->extent_op = NULL;
spin_unlock(&locked_ref->lock);
ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
must_insert_reserved);
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
unselect_delayed_ref_head(delayed_refs, locked_ref);
btrfs_put_delayed_ref(ref);
btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
ret);
return ret;
}
btrfs_put_delayed_ref(ref);
count++;
cond_resched();
}
/*
* We don't want to include ref heads since we can have empty ref heads
* and those will drastically skew our runtime down since we just do
* accounting, no actual extent tree updates.
*/
if (actual_count > 0) {
u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
u64 avg;
/*
* We weigh the current average higher than our current runtime
* to avoid large swings in the average.
*/
spin_lock(&delayed_refs->lock);
avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
spin_unlock(&delayed_refs->lock);
}
return 0;
}
#ifdef SCRAMBLE_DELAYED_REFS
/*
* Normally delayed refs get processed in ascending bytenr order. This
* correlates in most cases to the order added. To expose dependencies on this
* order, we start to process the tree in the middle instead of the beginning
*/
static u64 find_middle(struct rb_root *root)
{
struct rb_node *n = root->rb_node;
struct btrfs_delayed_ref_node *entry;
int alt = 1;
u64 middle;
u64 first = 0, last = 0;
n = rb_first(root);
if (n) {
entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
first = entry->bytenr;
}
n = rb_last(root);
if (n) {
entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
last = entry->bytenr;
}
n = root->rb_node;
while (n) {
entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
WARN_ON(!entry->in_tree);
middle = entry->bytenr;
if (alt)
n = n->rb_left;
else
n = n->rb_right;
alt = 1 - alt;
}
return middle;
}
#endif
static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
{
u64 num_bytes;
num_bytes = heads * (sizeof(struct btrfs_extent_item) +
sizeof(struct btrfs_extent_inline_ref));
if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
num_bytes += heads * sizeof(struct btrfs_tree_block_info);
/*
* We don't ever fill up leaves all the way so multiply by 2 just to be
* closer to what we're really going to want to use.
*/
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
}
/*
* Takes the number of bytes to be csumm'ed and figures out how many leaves it
* would require to store the csums for that many bytes.
*/
u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
{
u64 csum_size;
u64 num_csums_per_leaf;
u64 num_csums;
csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
num_csums_per_leaf = div64_u64(csum_size,
(u64)btrfs_super_csum_size(fs_info->super_copy));
num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
num_csums += num_csums_per_leaf - 1;
num_csums = div64_u64(num_csums, num_csums_per_leaf);
return num_csums;
}
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
u64 num_bytes, num_dirty_bgs_bytes;
int ret = 0;
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
num_heads = heads_to_leaves(fs_info, num_heads);
if (num_heads > 1)
num_bytes += (num_heads - 1) * fs_info->nodesize;
num_bytes <<= 1;
num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
fs_info->nodesize;
num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
num_dirty_bgs);
global_rsv = &fs_info->global_block_rsv;
/*
* If we can't allocate any more chunks lets make sure we have _lots_ of
* wiggle room since running delayed refs can create more delayed refs.
*/
if (global_rsv->space_info->full) {
num_dirty_bgs_bytes <<= 1;
num_bytes <<= 1;
}
spin_lock(&global_rsv->lock);
if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
ret = 1;
spin_unlock(&global_rsv->lock);
return ret;
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
u64 num_entries =
atomic_read(&trans->transaction->delayed_refs.num_entries);
u64 avg_runtime;
u64 val;
smp_mb();
avg_runtime = fs_info->avg_delayed_ref_runtime;
val = num_entries * avg_runtime;
if (val >= NSEC_PER_SEC)
return 1;
if (val >= NSEC_PER_SEC / 2)
return 2;
return btrfs_check_space_for_delayed_refs(trans, fs_info);
}
struct async_delayed_refs {
struct btrfs_root *root;
u64 transid;
int count;
int error;
int sync;
struct completion wait;
struct btrfs_work work;
};
static inline struct async_delayed_refs *
to_async_delayed_refs(struct btrfs_work *work)
{
return container_of(work, struct async_delayed_refs, work);
}
static void delayed_ref_async_start(struct btrfs_work *work)
{
struct async_delayed_refs *async = to_async_delayed_refs(work);
struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info = async->root->fs_info;
int ret;
/* if the commit is already started, we don't need to wait here */
if (btrfs_transaction_blocked(fs_info))
goto done;
trans = btrfs_join_transaction(async->root);
if (IS_ERR(trans)) {
async->error = PTR_ERR(trans);
goto done;
}
/*
* trans->sync means that when we call end_transaction, we won't
* wait on delayed refs
*/
trans->sync = true;
/* Don't bother flushing if we got into a different transaction */
if (trans->transid > async->transid)
goto end;
ret = btrfs_run_delayed_refs(trans, fs_info, async->count);
if (ret)
async->error = ret;
end:
ret = btrfs_end_transaction(trans);
if (ret && !async->error)
async->error = ret;
done:
if (async->sync)
complete(&async->wait);
else
kfree(async);
}
int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait)
{
struct async_delayed_refs *async;
int ret;
async = kmalloc(sizeof(*async), GFP_NOFS);
if (!async)
return -ENOMEM;
async->root = fs_info->tree_root;
async->count = count;
async->error = 0;
async->transid = transid;
if (wait)
async->sync = 1;
else
async->sync = 0;
init_completion(&async->wait);
btrfs_init_work(&async->work, btrfs_extent_refs_helper,
delayed_ref_async_start, NULL, NULL);
btrfs_queue_work(fs_info->extent_workers, &async->work);
if (wait) {
wait_for_completion(&async->wait);
ret = async->error;
kfree(async);
return ret;
}
return 0;
}
/*
* this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be
* 0, which means to process everything in the tree at the start
* of the run (but not newly added entries), or it can be some target
* number you'd like to process.
*
* Returns 0 on success or if called with an aborted transaction
* Returns <0 on error and aborts the transaction
*/
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, unsigned long count)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *head;
int ret;
int run_all = count == (unsigned long)-1;
bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
/* We'll clean this up in btrfs_cleanup_transaction */
if (trans->aborted)
return 0;
if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
return 0;
delayed_refs = &trans->transaction->delayed_refs;
if (count == 0)
count = atomic_read(&delayed_refs->num_entries) * 2;
again:
#ifdef SCRAMBLE_DELAYED_REFS
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
trans->can_flush_pending_bgs = false;
ret = __btrfs_run_delayed_refs(trans, fs_info, count);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
return ret;
}
if (run_all) {
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, fs_info);
spin_lock(&delayed_refs->lock);
node = rb_first(&delayed_refs->href_root);
if (!node) {
spin_unlock(&delayed_refs->lock);
goto out;
}
head = rb_entry(node, struct btrfs_delayed_ref_head,
href_node);
refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
/* Mutex was contended, block until it's released and retry. */
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
cond_resched();
goto again;
}
out:
trans->can_flush_pending_bgs = can_flush_pending_bgs;
return 0;
}
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data)
{
struct btrfs_delayed_extent_op *extent_op;
int ret;
extent_op = btrfs_alloc_delayed_extent_op();
if (!extent_op)
return -ENOMEM;
extent_op->flags_to_set = flags;
extent_op->update_flags = true;
extent_op->update_key = false;
extent_op->is_data = is_data ? true : false;
extent_op->level = level;
ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
num_bytes, extent_op);
if (ret)
btrfs_free_delayed_extent_op(extent_op);
return ret;
}
static noinline int check_delayed_ref(struct btrfs_root *root,
struct btrfs_path *path,
u64 objectid, u64 offset, u64 bytenr)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_data_ref *data_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_transaction *cur_trans;
struct rb_node *node;
int ret = 0;
cur_trans = root->fs_info->running_transaction;
if (!cur_trans)
return 0;
delayed_refs = &cur_trans->delayed_refs;
spin_lock(&delayed_refs->lock);
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
return 0;
}
if (!mutex_trylock(&head->mutex)) {
refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
/*
* Mutex was contended, block until it's released and let
* caller try again
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
spin_lock(&head->lock);
/*
* XXX: We should replace this with a proper search function in the
* future.
*/
for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
ret = 1;
break;
}
data_ref = btrfs_delayed_node_to_data_ref(ref);
/*
* If our ref doesn't match the one we're currently looking at
* then we have a cross reference.
*/
if (data_ref->root != root->root_key.objectid ||
data_ref->objectid != objectid ||
data_ref->offset != offset) {
ret = 1;
break;
}
}
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
return ret;
}
static noinline int check_committed_ref(struct btrfs_root *root,
struct btrfs_path *path,
u64 objectid, u64 offset, u64 bytenr)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_data_ref *ref;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_item *ei;
struct btrfs_key key;
u32 item_size;
int type;
int ret;
key.objectid = bytenr;
key.offset = (u64)-1;
key.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
BUG_ON(ret == 0); /* Corruption */
ret = -ENOENT;
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
goto out;
ret = 1;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
goto out;
}
#endif
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
if (item_size != sizeof(*ei) +
btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
goto out;
if (btrfs_extent_generation(leaf, ei) <=
btrfs_root_last_snapshot(&root->root_item))
goto out;
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
if (type != BTRFS_EXTENT_DATA_REF_KEY)
goto out;
ref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (btrfs_extent_refs(leaf, ei) !=
btrfs_extent_data_ref_count(leaf, ref) ||
btrfs_extent_data_ref_root(leaf, ref) !=
root->root_key.objectid ||
btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
goto out;
ret = 0;
out:
return ret;
}
int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
u64 bytenr)
{
struct btrfs_path *path;
int ret;
int ret2;
path = btrfs_alloc_path();
if (!path)
return -ENOENT;
do {
ret = check_committed_ref(root, path, objectid,
offset, bytenr);
if (ret && ret != -ENOENT)
goto out;
ret2 = check_delayed_ref(root, path, objectid,
offset, bytenr);
} while (ret2 == -EAGAIN);
if (ret2 && ret2 != -ENOENT) {
ret = ret2;
goto out;
}
if (ret != -ENOENT || ret2 != -ENOENT)
ret = 0;
out:
btrfs_free_path(path);
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
WARN_ON(ret > 0);
return ret;
}
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
int full_backref, int inc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 num_bytes;
u64 parent;
u64 ref_root;
u32 nritems;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
int i;
int level;
int ret = 0;
int (*process_func)(struct btrfs_trans_handle *,
struct btrfs_root *,
u64, u64, u64, u64, u64, u64);
if (btrfs_is_testing(fs_info))
return 0;
ref_root = btrfs_header_owner(buf);
nritems = btrfs_header_nritems(buf);
level = btrfs_header_level(buf);
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
return 0;
if (inc)
process_func = btrfs_inc_extent_ref;
else
process_func = btrfs_free_extent;
if (full_backref)
parent = buf->start;
else
parent = 0;
for (i = 0; i < nritems; i++) {
if (level == 0) {
btrfs_item_key_to_cpu(buf, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
fi = btrfs_item_ptr(buf, i,
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(buf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
if (bytenr == 0)
continue;
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, key.objectid,
key.offset);
if (ret)
goto fail;
} else {
bytenr = btrfs_node_blockptr(buf, i);
num_bytes = fs_info->nodesize;
ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, level - 1, 0);
if (ret)
goto fail;
}
}
return 0;
fail:
return ret;
}
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref)
{
return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
}
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref)
{
return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
}
static int write_one_cache_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_block_group_cache *cache)
{
int ret;
struct btrfs_root *extent_root = fs_info->extent_root;
unsigned long bi;
struct extent_buffer *leaf;
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto fail;
}
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
btrfs_mark_buffer_dirty(leaf);
fail:
btrfs_release_path(path);
return ret;
}
static struct btrfs_block_group_cache *
next_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
struct rb_node *node;
spin_lock(&fs_info->block_group_cache_lock);
/* If our block group was removed, we need a full search. */
if (RB_EMPTY_NODE(&cache->cache_node)) {
const u64 next_bytenr = cache->key.objectid + cache->key.offset;
spin_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
}
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
if (node) {
cache = rb_entry(node, struct btrfs_block_group_cache,
cache_node);
btrfs_get_block_group(cache);
} else
cache = NULL;
spin_unlock(&fs_info->block_group_cache_lock);
return cache;
}
static int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct btrfs_trans_handle *trans,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *root = fs_info->tree_root;
struct inode *inode = NULL;
struct extent_changeset *data_reserved = NULL;
u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR;
u64 num_pages = 0;
int retries = 0;
int ret = 0;
/*
* If this block group is smaller than 100 megs don't bother caching the
* block group.
*/
if (block_group->key.offset < (100 * SZ_1M)) {
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
return 0;
}
if (trans->aborted)
return 0;
again:
inode = lookup_free_space_inode(fs_info, block_group, path);
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
ret = PTR_ERR(inode);
btrfs_release_path(path);
goto out;
}
if (IS_ERR(inode)) {
BUG_ON(retries);
retries++;
if (block_group->ro)
goto out_free;
ret = create_free_space_inode(fs_info, trans, block_group,
path);
if (ret)
goto out_free;
goto again;
}
/*
* We want to set the generation to 0, that way if anything goes wrong
* from here on out we know not to trust this cache when we load up next
* time.
*/
BTRFS_I(inode)->generation = 0;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
/*
* So theoretically we could recover from this, simply set the
* super cache generation to 0 so we know to invalidate the
* cache, but then we'd have to keep track of the block groups
* that fail this way so we know we _have_ to reset this cache
* before the next commit or risk reading stale cache. So to
* limit our exposure to horrible edge cases lets just abort the
* transaction, this only happens in really bad situations
* anyway.
*/
btrfs_abort_transaction(trans, ret);
goto out_put;
}
WARN_ON(ret);
/* We've already setup this transaction, go ahead and exit */
if (block_group->cache_generation == trans->transid &&
i_size_read(inode)) {
dcs = BTRFS_DC_SETUP;
goto out_put;
}
if (i_size_read(inode) > 0) {
ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
if (ret)
goto out_put;
ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
if (ret)
goto out_put;
}
spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED ||
!btrfs_test_opt(fs_info, SPACE_CACHE)) {
/*
* don't bother trying to write stuff out _if_
* a) we're not cached,
* b) we're with nospace_cache mount option,
* c) we're with v2 space_cache (FREE_SPACE_TREE).
*/
dcs = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
goto out_put;
}
spin_unlock(&block_group->lock);
/*
* We hit an ENOSPC when setting up the cache in this transaction, just
* skip doing the setup, we've already cleared the cache so we're safe.
*/
if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
ret = -ENOSPC;
goto out_put;
}
/*
* Try to preallocate enough space based on how big the block group is.
* Keep in mind this has to include any pinned space which could end up
* taking up quite a bit since it's not folded into the other space
* cache.
*/
num_pages = div_u64(block_group->key.offset, SZ_256M);
if (!num_pages)
num_pages = 1;
num_pages *= 16;
num_pages *= PAGE_SIZE;
ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
if (ret)
goto out_put;
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
num_pages, num_pages,
&alloc_hint);
/*
* Our cache requires contiguous chunks so that we don't modify a bunch
* of metadata or split extents when writing the cache out, which means
* we can enospc if we are heavily fragmented in addition to just normal
* out of space conditions. So if we hit this just skip setting up any
* other block groups for this transaction, maybe we'll unpin enough
* space the next time around.
*/
if (!ret)
dcs = BTRFS_DC_SETUP;
else if (ret == -ENOSPC)
set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
out_put:
iput(inode);
out_free:
btrfs_release_path(path);
out:
spin_lock(&block_group->lock);
if (!ret && dcs == BTRFS_DC_SETUP)
block_group->cache_generation = trans->transid;
block_group->disk_cache_state = dcs;
spin_unlock(&block_group->lock);
extent_changeset_free(data_reserved);
return ret;
}
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache, *tmp;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_path *path;
if (list_empty(&cur_trans->dirty_bgs) ||
!btrfs_test_opt(fs_info, SPACE_CACHE))
return 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
/* Could add new block groups, use _safe just in case */
list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
dirty_list) {
if (cache->disk_cache_state == BTRFS_DC_CLEAR)
cache_save_setup(cache, trans, path);
}
btrfs_free_path(path);
return 0;
}
/*
* transaction commit does final block group cache writeback during a
* critical section where nothing is allowed to change the FS. This is
* required in order for the cache to actually match the block group,
* but can introduce a lot of latency into the commit.
*
* So, btrfs_start_dirty_block_groups is here to kick off block group
* cache IO. There's a chance we'll have to redo some of it if the
* block group changes again during the commit, but it greatly reduces
* the commit latency by getting rid of the easy block groups while
* we're still allowing others to join the commit.
*/
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
struct btrfs_path *path = NULL;
LIST_HEAD(dirty);
struct list_head *io = &cur_trans->io_bgs;
int num_started = 0;
int loops = 0;
spin_lock(&cur_trans->dirty_bgs_lock);
if (list_empty(&cur_trans->dirty_bgs)) {
spin_unlock(&cur_trans->dirty_bgs_lock);
return 0;
}
list_splice_init(&cur_trans->dirty_bgs, &dirty);
spin_unlock(&cur_trans->dirty_bgs_lock);
again:
/*
* make sure all the block groups on our dirty list actually
* exist
*/
btrfs_create_pending_block_groups(trans, fs_info);
if (!path) {
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
}
/*
* cache_write_mutex is here only to save us from balance or automatic
* removal of empty block groups deleting this block group while we are
* writing out the cache
*/
mutex_lock(&trans->transaction->cache_write_mutex);
while (!list_empty(&dirty)) {
cache = list_first_entry(&dirty,
struct btrfs_block_group_cache,
dirty_list);
/*
* this can happen if something re-dirties a block
* group that is already under IO. Just wait for it to
* finish and then do it all again
*/
if (!list_empty(&cache->io_list)) {
list_del_init(&cache->io_list);
btrfs_wait_cache_io(trans, cache, path);
btrfs_put_block_group(cache);
}
/*
* btrfs_wait_cache_io uses the cache->dirty_list to decide
* if it should update the cache_state. Don't delete
* until after we wait.
*
* Since we're not running in the commit critical section
* we need the dirty_bgs_lock to protect from update_block_group
*/
spin_lock(&cur_trans->dirty_bgs_lock);
list_del_init(&cache->dirty_list);
spin_unlock(&cur_trans->dirty_bgs_lock);
should_put = 1;
cache_save_setup(cache, trans, path);
if (cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(fs_info, trans,
cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
/*
* the cache_write_mutex is protecting
* the io_list
*/
list_add_tail(&cache->io_list, io);
} else {
/*
* if we failed to write the cache, the
* generation will be bad and life goes on
*/
ret = 0;
}
}
if (!ret) {
ret = write_one_cache_group(trans, fs_info,
path, cache);
/*
* Our block group might still be attached to the list
* of new block groups in the transaction handle of some
* other task (struct btrfs_trans_handle->new_bgs). This
* means its block group item isn't yet in the extent
* tree. If this happens ignore the error, as we will
* try again later in the critical section of the
* transaction commit.
*/
if (ret == -ENOENT) {
ret = 0;
spin_lock(&cur_trans->dirty_bgs_lock);
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list,
&cur_trans->dirty_bgs);
btrfs_get_block_group(cache);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret) {
btrfs_abort_transaction(trans, ret);
}
}
/* if its not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
if (ret)
break;
/*
* Avoid blocking other tasks for too long. It might even save
* us from writing caches for block groups that are going to be
* removed.
*/
mutex_unlock(&trans->transaction->cache_write_mutex);
mutex_lock(&trans->transaction->cache_write_mutex);
}
mutex_unlock(&trans->transaction->cache_write_mutex);
/*
* go through delayed refs for all the stuff we've just kicked off
* and then loop back (just once)
*/
ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (!ret && loops == 0) {
loops++;
spin_lock(&cur_trans->dirty_bgs_lock);
list_splice_init(&cur_trans->dirty_bgs, &dirty);
/*
* dirty_bgs_lock protects us from concurrent block group
* deletes too (not just cache_write_mutex).
*/
if (!list_empty(&dirty)) {
spin_unlock(&cur_trans->dirty_bgs_lock);
goto again;
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret < 0) {
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
}
btrfs_free_path(path);
return ret;
}
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
struct btrfs_path *path;
struct list_head *io = &cur_trans->io_bgs;
int num_started = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
/*
* Even though we are in the critical section of the transaction commit,
* we can still have concurrent tasks adding elements to this
* transaction's list of dirty block groups. These tasks correspond to
* endio free space workers started when writeback finishes for a
* space cache, which run inode.c:btrfs_finish_ordered_io(), and can
* allocate new block groups as a result of COWing nodes of the root
* tree when updating the free space inode. The writeback for the space
* caches is triggered by an earlier call to
* btrfs_start_dirty_block_groups() and iterations of the following
* loop.
* Also we want to do the cache_save_setup first and then run the
* delayed refs to make sure we have the best chance at doing this all
* in one shot.
*/
spin_lock(&cur_trans->dirty_bgs_lock);
while (!list_empty(&cur_trans->dirty_bgs)) {
cache = list_first_entry(&cur_trans->dirty_bgs,
struct btrfs_block_group_cache,
dirty_list);
/*
* this can happen if cache_save_setup re-dirties a block
* group that is already under IO. Just wait for it to
* finish and then do it all again
*/
if (!list_empty(&cache->io_list)) {
spin_unlock(&cur_trans->dirty_bgs_lock);
list_del_init(&cache->io_list);
btrfs_wait_cache_io(trans, cache, path);
btrfs_put_block_group(cache);
spin_lock(&cur_trans->dirty_bgs_lock);
}
/*
* don't remove from the dirty list until after we've waited
* on any pending IO
*/
list_del_init(&cache->dirty_list);
spin_unlock(&cur_trans->dirty_bgs_lock);
should_put = 1;
cache_save_setup(cache, trans, path);
if (!ret)
ret = btrfs_run_delayed_refs(trans, fs_info,
(unsigned long) -1);
if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(fs_info, trans,
cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
list_add_tail(&cache->io_list, io);
} else {
/*
* if we failed to write the cache, the
* generation will be bad and life goes on
*/
ret = 0;
}
}
if (!ret) {
ret = write_one_cache_group(trans, fs_info,
path, cache);
/*
* One of the free space endio workers might have
* created a new block group while updating a free space
* cache's inode (at inode.c:btrfs_finish_ordered_io())
* and hasn't released its transaction handle yet, in
* which case the new block group is still attached to
* its transaction handle and its creation has not
* finished yet (no block group item in the extent tree
* yet, etc). If this is the case, wait for all free
* space endio workers to finish and retry. This is a
* a very rare case so no need for a more efficient and
* complex approach.
*/
if (ret == -ENOENT) {
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
ret = write_one_cache_group(trans, fs_info,
path, cache);
}
if (ret)
btrfs_abort_transaction(trans, ret);
}
/* if its not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
while (!list_empty(io)) {
cache = list_first_entry(io, struct btrfs_block_group_cache,
io_list);
list_del_init(&cache->io_list);
btrfs_wait_cache_io(trans, cache, path);
btrfs_put_block_group(cache);
}
btrfs_free_path(path);
return ret;
}
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *block_group;
int readonly = 0;
block_group = btrfs_lookup_block_group(fs_info, bytenr);
if (!block_group || block_group->ro)
readonly = 1;
if (block_group)
btrfs_put_block_group(block_group);
return readonly;
}
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *bg;
bool ret = true;
bg = btrfs_lookup_block_group(fs_info, bytenr);
if (!bg)
return false;
spin_lock(&bg->lock);
if (bg->ro)
ret = false;
else
atomic_inc(&bg->nocow_writers);
spin_unlock(&bg->lock);
/* no put on block group, done by btrfs_dec_nocow_writers */
if (!ret)
btrfs_put_block_group(bg);
return ret;
}
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *bg;
bg = btrfs_lookup_block_group(fs_info, bytenr);
ASSERT(bg);
if (atomic_dec_and_test(&bg->nocow_writers))
wake_up_atomic_t(&bg->nocow_writers);
/*
* Once for our lookup and once for the lookup done by a previous call
* to btrfs_inc_nocow_writers()
*/
btrfs_put_block_group(bg);
btrfs_put_block_group(bg);
}
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
{
wait_on_atomic_t(&bg->nocow_writers, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
}
static const char *alloc_name(u64 flags)
{
switch (flags) {
case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
return "mixed";
case BTRFS_BLOCK_GROUP_METADATA:
return "metadata";
case BTRFS_BLOCK_GROUP_DATA:
return "data";
case BTRFS_BLOCK_GROUP_SYSTEM:
return "system";
default:
WARN_ON(1);
return "invalid-combination";
};
}
static int create_space_info(struct btrfs_fs_info *info, u64 flags,
struct btrfs_space_info **new)
{
struct btrfs_space_info *space_info;
int i;
int ret;
space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
if (!space_info)
return -ENOMEM;
ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
GFP_KERNEL);
if (ret) {
kfree(space_info);
return ret;
}
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
INIT_LIST_HEAD(&space_info->block_groups[i]);
init_rwsem(&space_info->groups_sem);
spin_lock_init(&space_info->lock);
space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
init_waitqueue_head(&space_info->wait);
INIT_LIST_HEAD(&space_info->ro_bgs);
INIT_LIST_HEAD(&space_info->tickets);
INIT_LIST_HEAD(&space_info->priority_tickets);
ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
info->space_info_kobj, "%s",
alloc_name(space_info->flags));
if (ret) {
percpu_counter_destroy(&space_info->total_bytes_pinned);
kfree(space_info);
return ret;
}
*new = space_info;
list_add_rcu(&space_info->list, &info->space_info);
if (flags & BTRFS_BLOCK_GROUP_DATA)
info->data_sinfo = space_info;
return ret;
}
static void update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
u64 bytes_readonly,
struct btrfs_space_info **space_info)
{
struct btrfs_space_info *found;
int factor;
if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
factor = 2;
else
factor = 1;
found = __find_space_info(info, flags);
ASSERT(found);
spin_lock(&found->lock);
found->total_bytes += total_bytes;
found->disk_total += total_bytes * factor;
found->bytes_used += bytes_used;
found->disk_used += bytes_used * factor;
found->bytes_readonly += bytes_readonly;
if (total_bytes > 0)
found->full = 0;
space_info_add_new_bytes(info, found, total_bytes -
bytes_used - bytes_readonly);
spin_unlock(&found->lock);
*space_info = found;
}
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits |= extra_flags;
write_sequnlock(&fs_info->profiles_lock);
}
/*
* returns target flags in extended format or 0 if restripe for this
* chunk_type is not in progress
*
* should be called with either volume_mutex or balance_lock held
*/
static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0;
if (!bctl)
return 0;
if (flags & BTRFS_BLOCK_GROUP_DATA &&
bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
}
return target;
}
/*
* @flags: available profiles in extended format (see ctree.h)
*
* Returns reduced profile in chunk format. If profile changing is in
* progress (either running or paused) picks the target profile (if it's
* already available), otherwise falls back to plain reducing.
*/
static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 num_devices = fs_info->fs_devices->rw_devices;
u64 target;
u64 raid_type;
u64 allowed = 0;
/*
* see if restripe for this chunk_type is in progress, if so
* try to reduce to the target profile
*/
spin_lock(&fs_info->balance_lock);
target = get_restripe_target(fs_info, flags);
if (target) {
/* pick target profile only if it's already available */
if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
spin_unlock(&fs_info->balance_lock);
return extended_to_chunk(target);
}
}
spin_unlock(&fs_info->balance_lock);
/* First, mask out the RAID levels which aren't possible */
for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
if (num_devices >= btrfs_raid_array[raid_type].devs_min)
allowed |= btrfs_raid_group[raid_type];
}
allowed &= flags;
if (allowed & BTRFS_BLOCK_GROUP_RAID6)
allowed = BTRFS_BLOCK_GROUP_RAID6;
else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
allowed = BTRFS_BLOCK_GROUP_RAID5;
else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
allowed = BTRFS_BLOCK_GROUP_RAID10;
else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
allowed = BTRFS_BLOCK_GROUP_RAID1;
else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
allowed = BTRFS_BLOCK_GROUP_RAID0;
flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
return extended_to_chunk(flags | allowed);
}
static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
{
unsigned seq;
u64 flags;
do {
flags = orig_flags;
seq = read_seqbegin(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
flags |= fs_info->avail_data_alloc_bits;
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
flags |= fs_info->avail_system_alloc_bits;
else if (flags & BTRFS_BLOCK_GROUP_METADATA)
flags |= fs_info->avail_metadata_alloc_bits;
} while (read_seqretry(&fs_info->profiles_lock, seq));
return btrfs_reduce_alloc_profile(fs_info, flags);
}
static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 flags;
u64 ret;
if (data)
flags = BTRFS_BLOCK_GROUP_DATA;
else if (root == fs_info->chunk_root)
flags = BTRFS_BLOCK_GROUP_SYSTEM;
else
flags = BTRFS_BLOCK_GROUP_METADATA;
ret = get_alloc_profile(fs_info, flags);
return ret;
}
u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
{
return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
}
u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
{
return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
}
u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
{
return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
}
static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
bool may_use_included)
{
ASSERT(s_info);
return s_info->bytes_used + s_info->bytes_reserved +
s_info->bytes_pinned + s_info->bytes_readonly +
(may_use_included ? s_info->bytes_may_use : 0);
}
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
u64 used;
int ret = 0;
int need_commit = 2;
int have_pinned_space;
/* make sure bytes are sectorsize aligned */
bytes = ALIGN(bytes, fs_info->sectorsize);
if (btrfs_is_free_space_inode(inode)) {
need_commit = 0;
ASSERT(current->journal_info);
}
again:
/* make sure we have enough space to handle the data first */
spin_lock(&data_sinfo->lock);
used = btrfs_space_info_used(data_sinfo, true);
if (used + bytes > data_sinfo->total_bytes) {
struct btrfs_trans_handle *trans;
/*
* if we don't have enough free bytes in this space then we need
* to alloc a new chunk.
*/
if (!data_sinfo->full) {
u64 alloc_target;
data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
spin_unlock(&data_sinfo->lock);
alloc_target = btrfs_data_alloc_profile(fs_info);
/*
* It is ugly that we don't call nolock join
* transaction for the free space inode case here.
* But it is safe because we only do the data space
* reservation for the free space cache in the
* transaction context, the common join transaction
* just increase the counter of the current transaction
* handler, doesn't try to acquire the trans_lock of
* the fs.
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = do_chunk_alloc(trans, fs_info, alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans);
if (ret < 0) {
if (ret != -ENOSPC)
return ret;
else {
have_pinned_space = 1;
goto commit_trans;
}
}
goto again;
}
/*
* If we don't have enough pinned space to deal with this
* allocation, and no removed chunk in current transaction,
* don't bother committing the transaction.
*/
have_pinned_space = percpu_counter_compare(
&data_sinfo->total_bytes_pinned,
used + bytes - data_sinfo->total_bytes);
spin_unlock(&data_sinfo->lock);
/* commit the current transaction and try again */
commit_trans:
if (need_commit &&
!atomic_read(&fs_info->open_ioctl_trans)) {
need_commit--;
if (need_commit > 0) {
btrfs_start_delalloc_roots(fs_info, 0, -1);
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
(u64)-1);
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (have_pinned_space >= 0 ||
test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
&trans->transaction->flags) ||
need_commit > 0) {
ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
/*
* The cleaner kthread might still be doing iput
* operations. Wait for it to finish so that
* more space is released.
*/
mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
goto again;
} else {
btrfs_end_transaction(trans);
}
}
trace_btrfs_space_reservation(fs_info,
"space_info:enospc",
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
data_sinfo->bytes_may_use += bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
return ret;
}
int btrfs_check_data_free_space(struct inode *inode,
struct extent_changeset **reserved, u64 start, u64 len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
/* align the range */
len = round_up(start + len, fs_info->sectorsize) -
round_down(start, fs_info->sectorsize);
start = round_down(start, fs_info->sectorsize);
ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
if (ret < 0)
return ret;
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
if (ret < 0)
btrfs_free_reserved_data_space_noquota(inode, start, len);
else
ret = 0;
return ret;
}
/*
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will *NOT* use accurate qgroup reserved space API, just for case
* which we can't sleep and is sure it won't affect qgroup reserved space.
* Like clear_bit_hook().
*/
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_space_info *data_sinfo;
/* Make sure the range is aligned to sectorsize */
len = round_up(start + len, fs_info->sectorsize) -
round_down(start, fs_info->sectorsize);
start = round_down(start, fs_info->sectorsize);
data_sinfo = fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
if (WARN_ON(data_sinfo->bytes_may_use < len))
data_sinfo->bytes_may_use = 0;
else
data_sinfo->bytes_may_use -= len;
trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, len, 0);
spin_unlock(&data_sinfo->lock);
}
/*
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will handle the per-inode data rsv map for accurate reserved
* space framework.
*/
void btrfs_free_reserved_data_space(struct inode *inode,
struct extent_changeset *reserved, u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
/* Make sure the range is aligned to sectorsize */
len = round_up(start + len, root->fs_info->sectorsize) -
round_down(start, root->fs_info->sectorsize);
start = round_down(start, root->fs_info->sectorsize);
btrfs_free_reserved_data_space_noquota(inode, start, len);
btrfs_qgroup_free_data(inode, reserved, start, len);
}
static void force_metadata_allocation(struct btrfs_fs_info *info)
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
found->force_alloc = CHUNK_ALLOC_FORCE;
}
rcu_read_unlock();
}
static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
{
return (global->size << 1);
}
static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *sinfo, int force)
{
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 bytes_used = btrfs_space_info_used(sinfo, false);
u64 thresh;
if (force == CHUNK_ALLOC_FORCE)
return 1;
/*
* We need to take into account the global rsv because for all intents
* and purposes it's used space. Don't worry about locking the
* global_rsv, it doesn't change except when the transaction commits.
*/
if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
bytes_used += calc_global_rsv_need_space(global_rsv);
/*
* in limited mode, we want to have some free space up to
* about 1% of the FS size.
*/
if (force == CHUNK_ALLOC_LIMITED) {
thresh = btrfs_super_total_bytes(fs_info->super_copy);
thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
if (sinfo->total_bytes - bytes_used < thresh)
return 1;
}
if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
return 0;
return 1;
}
static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
{
u64 num_dev;
if (type & (BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6))
num_dev = fs_info->fs_devices->rw_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_dev = 2;
else
num_dev = 1; /* DUP or single */
return num_dev;
}
/*
* If @is_allocation is true, reserve space in the system space info necessary
* for allocating a chunk, otherwise if it's false, reserve space necessary for
* removing a chunk.
*/
void check_system_chunk(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 type)
{
struct btrfs_space_info *info;
u64 left;
u64 thresh;
int ret = 0;
u64 num_devs;
/*
* Needed because we can end up allocating a system chunk and for an
* atomic and race free space reservation in the chunk block reserve.
*/
ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
spin_lock(&info->lock);
left = info->total_bytes - btrfs_space_info_used(info, true);
spin_unlock(&info->lock);
num_devs = get_profile_num_devs(fs_info, type);
/* num_devs device items to update and 1 chunk item to add or remove */
thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
btrfs_calc_trans_metadata_size(fs_info, 1);
if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
left, thresh, type);
dump_space_info(fs_info, info, 0, 0);
}
if (left < thresh) {
u64 flags = btrfs_system_alloc_profile(fs_info);
/*
* Ignore failure to create system chunk. We might end up not
* needing it, as we might not need to COW all nodes/leafs from
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
*/
ret = btrfs_alloc_chunk(trans, fs_info, flags);
}
if (!ret) {
ret = btrfs_block_rsv_add(fs_info->chunk_root,
&fs_info->chunk_block_rsv,
thresh, BTRFS_RESERVE_NO_FLUSH);
if (!ret)
trans->chunk_bytes_reserved += thresh;
}
}
/*
* If force is CHUNK_ALLOC_FORCE:
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
* If force is NOT CHUNK_ALLOC_FORCE:
* - return 0 if it doesn't need to allocate a new chunk,
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
*/
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 flags, int force)
{
struct btrfs_space_info *space_info;
int wait_for_alloc = 0;
int ret = 0;
/* Don't re-enter if we're already allocating a chunk */
if (trans->allocating_chunk)
return -ENOSPC;
space_info = __find_space_info(fs_info, flags);
if (!space_info) {
ret = create_space_info(fs_info, flags, &space_info);
if (ret)
return ret;
}
again:
spin_lock(&space_info->lock);
if (force < space_info->force_alloc)
force = space_info->force_alloc;
if (space_info->full) {
if (should_alloc_chunk(fs_info, space_info, force))
ret = -ENOSPC;
else
ret = 0;
spin_unlock(&space_info->lock);
return ret;
}
if (!should_alloc_chunk(fs_info, space_info, force)) {
spin_unlock(&space_info->lock);
return 0;
} else if (space_info->chunk_alloc) {
wait_for_alloc = 1;
} else {
space_info->chunk_alloc = 1;
}
spin_unlock(&space_info->lock);
mutex_lock(&fs_info->chunk_mutex);
/*
* The chunk_mutex is held throughout the entirety of a chunk
* allocation, so once we've acquired the chunk_mutex we know that the
* other guy is done and we need to recheck and see if we should
* allocate.
*/
if (wait_for_alloc) {
mutex_unlock(&fs_info->chunk_mutex);
wait_for_alloc = 0;
goto again;
}
trans->allocating_chunk = true;
/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
*/
if (btrfs_mixed_space_info(space_info))
flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
/*
* if we're doing a data chunk, go ahead and make sure that
* we keep a reasonable number of metadata chunks allocated in the
* FS as well.
*/
if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
fs_info->data_chunk_allocations++;
if (!(fs_info->data_chunk_allocations %
fs_info->metadata_ratio))
force_metadata_allocation(fs_info);
}
/*
* Check if we have enough space in SYSTEM chunk because we may need
* to update devices.
*/
check_system_chunk(trans, fs_info, flags);
ret = btrfs_alloc_chunk(trans, fs_info, flags);
trans->allocating_chunk = false;
spin_lock(&space_info->lock);
if (ret < 0 && ret != -ENOSPC)
goto out;
if (ret)
space_info->full = 1;
else
ret = 1;
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
out:
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
mutex_unlock(&fs_info->chunk_mutex);
/*
* When we allocate a new chunk we reserve space in the chunk block
* reserve to make sure we can COW nodes/leafs in the chunk tree or
* add new nodes/leafs to it if we end up needing to do it when
* inserting the chunk item and updating device items as part of the
* second phase of chunk allocation, performed by
* btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
* large number of new block groups to create in our transaction
* handle's new_bgs list to avoid exhausting the chunk block reserve
* in extreme cases - like having a single transaction create many new
* block groups when starting to write out the free space caches of all
* the block groups that were made dirty during the lifetime of the
* transaction.
*/
if (trans->can_flush_pending_bgs &&
trans->chunk_bytes_reserved >= (u64)SZ_2M) {
btrfs_create_pending_block_groups(trans, fs_info);
btrfs_trans_release_chunk_metadata(trans);
}
return ret;
}
static int can_overcommit(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush,
bool system_chunk)
{
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 profile;
u64 space_size;
u64 avail;
u64 used;
/* Don't overcommit when in mixed mode. */
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
return 0;
if (system_chunk)
profile = btrfs_system_alloc_profile(fs_info);
else
profile = btrfs_metadata_alloc_profile(fs_info);
used = btrfs_space_info_used(space_info, false);
/*
* We only want to allow over committing if we have lots of actual space
* free, but if we don't have enough space to handle the global reserve
* space then we could end up having a real enospc problem when trying
* to allocate a chunk or some other such important allocation.
*/
spin_lock(&global_rsv->lock);
space_size = calc_global_rsv_need_space(global_rsv);
spin_unlock(&global_rsv->lock);
if (used + space_size >= space_info->total_bytes)
return 0;
used += space_info->bytes_may_use;
avail = atomic64_read(&fs_info->free_chunk_space);
/*
* If we have dup, raid1 or raid10 then only half of the free
* space is actually useable. For raid56, the space info used
* doesn't include the parity drive, so we don't have to
* change the math
*/
if (profile & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
avail >>= 1;
/*
* If we aren't flushing all things, let us overcommit up to
* 1/2th of the space. If we can flush, don't let us overcommit
* too much, let it overcommit up to 1/8 of the space.
*/
if (flush == BTRFS_RESERVE_FLUSH_ALL)
avail >>= 3;
else
avail >>= 1;
if (used + bytes < space_info->total_bytes + avail)
return 1;
return 0;
}
static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
unsigned long nr_pages, int nr_items)
{
struct super_block *sb = fs_info->sb;
if (down_read_trylock(&sb->s_umount)) {
writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
up_read(&sb->s_umount);
} else {
/*
* We needn't worry the filesystem going from r/w to r/o though
* we don't acquire ->s_umount mutex, because the filesystem
* should guarantee the delalloc inodes list be empty after
* the filesystem is readonly(all dirty pages are written to
* the disk).
*/
btrfs_start_delalloc_roots(fs_info, 0, nr_items);
if (!current->journal_info)
btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
}
}
static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
u64 to_reclaim)
{
u64 bytes;
u64 nr;
bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
nr = div64_u64(to_reclaim, bytes);
if (!nr)
nr = 1;
return nr;
}
#define EXTENT_SIZE_PER_ITEM SZ_256K
/*
* shrink metadata reservation for delalloc
*/
static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
u64 orig, bool wait_ordered)
{
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
u64 max_reclaim;
u64 items;
long time_left;
unsigned long nr_pages;
int loops;
enum btrfs_reserve_flush_enum flush;
/* Calc the number of the pages we need flush for space reservation */
items = calc_reclaim_items_nr(fs_info, to_reclaim);
to_reclaim = items * EXTENT_SIZE_PER_ITEM;
trans = (struct btrfs_trans_handle *)current->journal_info;
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
delalloc_bytes = percpu_counter_sum_positive(
&fs_info->delalloc_bytes);
if (delalloc_bytes == 0) {
if (trans)
return;
if (wait_ordered)
btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
return;
}
loops = 0;
while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim);
nr_pages = max_reclaim >> PAGE_SHIFT;
btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
/*
* We need to wait for the async pages to actually start before
* we do anything.
*/
max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
if (!max_reclaim)
goto skip_async;
if (max_reclaim <= nr_pages)
max_reclaim = 0;
else
max_reclaim -= nr_pages;
wait_event(fs_info->async_submit_wait,
atomic_read(&fs_info->async_delalloc_pages) <=
(int)max_reclaim);
skip_async:
if (!trans)
flush = BTRFS_RESERVE_FLUSH_ALL;
else
flush = BTRFS_RESERVE_NO_FLUSH;
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets) &&
list_empty(&space_info->priority_tickets)) {
spin_unlock(&space_info->lock);
break;
}
spin_unlock(&space_info->lock);
loops++;
if (wait_ordered && !trans) {
btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
} else {
time_left = schedule_timeout_killable(1);
if (time_left)
break;
}
delalloc_bytes = percpu_counter_sum_positive(
&fs_info->delalloc_bytes);
}
}
struct reserve_ticket {
u64 bytes;
int error;
struct list_head list;
wait_queue_head_t wait;
};
/**
* maybe_commit_transaction - possibly commit the transaction if its ok to
* @root - the root we're allocating for
* @bytes - the number of bytes we want to reserve
* @force - force the commit
*
* This will check to make sure that committing the transaction will actually
* get us somewhere and then commit the transaction if it does. Otherwise it
* will return -ENOSPC.
*/
static int may_commit_transaction(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{
struct reserve_ticket *ticket = NULL;
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
struct btrfs_trans_handle *trans;
u64 bytes;
trans = (struct btrfs_trans_handle *)current->journal_info;
if (trans)
return -EAGAIN;
spin_lock(&space_info->lock);
if (!list_empty(&space_info->priority_tickets))
ticket = list_first_entry(&space_info->priority_tickets,
struct reserve_ticket, list);
else if (!list_empty(&space_info->tickets))
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
bytes = (ticket) ? ticket->bytes : 0;
spin_unlock(&space_info->lock);
if (!bytes)
return 0;
/* See if there is enough pinned space to make this reservation */
if (percpu_counter_compare(&space_info->total_bytes_pinned,
bytes) >= 0)
goto commit;
/*
* See if there is some space in the delayed insertion reservation for
* this reservation.
*/
if (space_info != delayed_rsv->space_info)
return -ENOSPC;
spin_lock(&delayed_rsv->lock);
if (delayed_rsv->size > bytes)
bytes = 0;
else
bytes -= delayed_rsv->size;
spin_unlock(&delayed_rsv->lock);
if (percpu_counter_compare(&space_info->total_bytes_pinned,
bytes) < 0) {
return -ENOSPC;
}
commit:
trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans))
return -ENOSPC;
return btrfs_commit_transaction(trans);
}
/*
* Try to flush some data based on policy set by @state. This is only advisory
* and may fail for various reasons. The caller is supposed to examine the
* state of @space_info to detect the outcome.
*/
static void flush_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 num_bytes,
int state)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_trans_handle *trans;
int nr;
int ret = 0;
switch (state) {
case FLUSH_DELAYED_ITEMS_NR:
case FLUSH_DELAYED_ITEMS:
if (state == FLUSH_DELAYED_ITEMS_NR)
nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
else
nr = -1;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
btrfs_end_transaction(trans);
break;
case FLUSH_DELALLOC:
case FLUSH_DELALLOC_WAIT:
shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
state == FLUSH_DELALLOC_WAIT);
break;
case ALLOC_CHUNK:
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
ret = do_chunk_alloc(trans, fs_info,
btrfs_metadata_alloc_profile(fs_info),
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans);
if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
case COMMIT_TRANS:
ret = may_commit_transaction(fs_info, space_info);
break;
default:
ret = -ENOSPC;
break;
}
trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
ret);
return;
}
static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
bool system_chunk)
{
struct reserve_ticket *ticket;
u64 used;
u64 expected;
u64 to_reclaim = 0;
list_for_each_entry(ticket, &space_info->tickets, list)
to_reclaim += ticket->bytes;
list_for_each_entry(ticket, &space_info->priority_tickets, list)
to_reclaim += ticket->bytes;
if (to_reclaim)
return to_reclaim;
to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
if (can_overcommit(fs_info, space_info, to_reclaim,
BTRFS_RESERVE_FLUSH_ALL, system_chunk))
return 0;
used = btrfs_space_info_used(space_info, true);
if (can_overcommit(fs_info, space_info, SZ_1M,
BTRFS_RESERVE_FLUSH_ALL, system_chunk))
expected = div_factor_fine(space_info->total_bytes, 95);
else
expected = div_factor_fine(space_info->total_bytes, 90);
if (used > expected)
to_reclaim = used - expected;
else
to_reclaim = 0;
to_reclaim = min(to_reclaim, space_info->bytes_may_use +
space_info->bytes_reserved);
return to_reclaim;
}
static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 used, bool system_chunk)
{
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
/* If we're just plain full then async reclaim just slows us down. */
if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
return 0;
if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
system_chunk))
return 0;
return (used >= thresh && !btrfs_fs_closing(fs_info) &&
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
static void wake_all_tickets(struct list_head *head)
{
struct reserve_ticket *ticket;
while (!list_empty(head)) {
ticket = list_first_entry(head, struct reserve_ticket, list);
list_del_init(&ticket->list);
ticket->error = -ENOSPC;
wake_up(&ticket->wait);
}
}
/*
* This is for normal flushers, we can wait all goddamned day if we want to. We
* will loop and continuously try to flush as long as we are making progress.
* We count progress as clearing off tickets each time we have to loop.
*/
static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
{
struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
u64 to_reclaim;
int flush_state;
int commit_cycles = 0;
u64 last_tickets_id;
fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
spin_lock(&space_info->lock);
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
false);
if (!to_reclaim) {
space_info->flush = 0;
spin_unlock(&space_info->lock);
return;
}
last_tickets_id = space_info->tickets_id;
spin_unlock(&space_info->lock);
flush_state = FLUSH_DELAYED_ITEMS_NR;
do {
flush_space(fs_info, space_info, to_reclaim, flush_state);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
space_info->flush = 0;
spin_unlock(&space_info->lock);
return;
}
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
space_info,
false);
if (last_tickets_id == space_info->tickets_id) {
flush_state++;
} else {
last_tickets_id = space_info->tickets_id;
flush_state = FLUSH_DELAYED_ITEMS_NR;
if (commit_cycles)
commit_cycles--;
}
if (flush_state > COMMIT_TRANS) {
commit_cycles++;
if (commit_cycles > 2) {
wake_all_tickets(&space_info->tickets);
space_info->flush = 0;
} else {
flush_state = FLUSH_DELAYED_ITEMS_NR;
}
}
spin_unlock(&space_info->lock);
} while (flush_state <= COMMIT_TRANS);
}
void btrfs_init_async_reclaim_work(struct work_struct *work)
{
INIT_WORK(work, btrfs_async_reclaim_metadata_space);
}
static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
u64 to_reclaim;
int flush_state = FLUSH_DELAYED_ITEMS_NR;
spin_lock(&space_info->lock);
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
false);
if (!to_reclaim) {
spin_unlock(&space_info->lock);
return;
}
spin_unlock(&space_info->lock);
do {
flush_space(fs_info, space_info, to_reclaim, flush_state);
flush_state++;
spin_lock(&space_info->lock);
if (ticket->bytes == 0) {
spin_unlock(&space_info->lock);
return;
}
spin_unlock(&space_info->lock);
/*
* Priority flushers can't wait on delalloc without
* deadlocking.
*/
if (flush_state == FLUSH_DELALLOC ||
flush_state == FLUSH_DELALLOC_WAIT)
flush_state = ALLOC_CHUNK;
} while (flush_state < COMMIT_TRANS);
}
static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
struct reserve_ticket *ticket, u64 orig_bytes)
{
DEFINE_WAIT(wait);
int ret = 0;
spin_lock(&space_info->lock);
while (ticket->bytes > 0 && ticket->error == 0) {
ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
if (ret) {
ret = -EINTR;
break;
}
spin_unlock(&space_info->lock);
schedule();
finish_wait(&ticket->wait, &wait);
spin_lock(&space_info->lock);
}
if (!ret)
ret = ticket->error;
if (!list_empty(&ticket->list))
list_del_init(&ticket->list);
if (ticket->bytes && ticket->bytes < orig_bytes) {
u64 num_bytes = orig_bytes - ticket->bytes;
space_info->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, num_bytes, 0);
}
spin_unlock(&space_info->lock);
return ret;
}
/**
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
* @root - the root we're allocating for
* @space_info - the space info we want to allocate from
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
* This will reserve orig_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
* flush out space to make room. It will do this by flushing delalloc if
* possible or committing the transaction. If flush is 0 then no attempts to
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush,
bool system_chunk)
{
struct reserve_ticket ticket;
u64 used;
int ret = 0;
ASSERT(orig_bytes);
ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
spin_lock(&space_info->lock);
ret = -ENOSPC;
used = btrfs_space_info_used(space_info, true);
/*
* If we have enough space then hooray, make our reservation and carry
* on. If not see if we can overcommit, and if we can, hooray carry on.
* If not things get more complicated.
*/
if (used + orig_bytes <= space_info->total_bytes) {
space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, orig_bytes, 1);
ret = 0;
} else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
system_chunk)) {
space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, orig_bytes, 1);
ret = 0;
}
/*
* If we couldn't make a reservation then setup our reservation ticket
* and kick the async worker if it's not already running.
*
* If we are a priority flusher then we just need to add our ticket to
* the list and we will do our own flushing further down.
*/
if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
ticket.bytes = orig_bytes;
ticket.error = 0;
init_waitqueue_head(&ticket.wait);
if (flush == BTRFS_RESERVE_FLUSH_ALL) {
list_add_tail(&ticket.list, &space_info->tickets);
if (!space_info->flush) {
space_info->flush = 1;
trace_btrfs_trigger_flush(fs_info,
space_info->flags,
orig_bytes, flush,
"enospc");
queue_work(system_unbound_wq,
&fs_info->async_reclaim_work);
}
} else {
list_add_tail(&ticket.list,
&space_info->priority_tickets);
}
} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
used += orig_bytes;
/*
* We will do the space reservation dance during log replay,
* which means we won't have fs_info->fs_root set, so don't do
* the async reclaim as we will panic.
*/
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
need_do_async_reclaim(fs_info, space_info,
used, system_chunk) &&
!work_busy(&fs_info->async_reclaim_work)) {
trace_btrfs_trigger_flush(fs_info, space_info->flags,
orig_bytes, flush, "preempt");
queue_work(system_unbound_wq,
&fs_info->async_reclaim_work);
}
}
spin_unlock(&space_info->lock);
if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
return ret;
if (flush == BTRFS_RESERVE_FLUSH_ALL)
return wait_reserve_ticket(fs_info, space_info, &ticket,
orig_bytes);
ret = 0;
priority_reclaim_metadata_space(fs_info, space_info, &ticket);
spin_lock(&space_info->lock);
if (ticket.bytes) {
if (ticket.bytes < orig_bytes) {
u64 num_bytes = orig_bytes - ticket.bytes;
space_info->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags,
num_bytes, 0);
}
list_del_init(&ticket.list);
ret = -ENOSPC;
}
spin_unlock(&space_info->lock);
ASSERT(list_empty(&ticket.list));
return ret;
}
/**
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
* @root - the root we're allocating for
* @block_rsv - the block_rsv we're allocating for
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
* This will reserve orgi_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
* flush out space to make room. It will do this by flushing delalloc if
* possible or committing the transaction. If flush is 0 then no attempts to
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
static int reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
int ret;
bool system_chunk = (root == fs_info->chunk_root);
ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
orig_bytes, flush, system_chunk);
if (ret == -ENOSPC &&
unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
if (block_rsv != global_rsv &&
!block_rsv_use_bytes(global_rsv, orig_bytes))
ret = 0;
}
if (ret == -ENOSPC)
trace_btrfs_space_reservation(fs_info, "space_info:enospc",
block_rsv->space_info->flags,
orig_bytes, 1);
return ret;
}
static struct btrfs_block_rsv *get_block_rsv(
const struct btrfs_trans_handle *trans,
const struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *block_rsv = NULL;
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
(root == fs_info->csum_root && trans->adding_csums) ||
(root == fs_info->uuid_root))
block_rsv = trans->block_rsv;
if (!block_rsv)
block_rsv = root->block_rsv;
if (!block_rsv)
block_rsv = &fs_info->empty_block_rsv;
return block_rsv;
}
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
{
int ret = -ENOSPC;
spin_lock(&block_rsv->lock);
if (block_rsv->reserved >= num_bytes) {
block_rsv->reserved -= num_bytes;
if (block_rsv->reserved < block_rsv->size)
block_rsv->full = 0;
ret = 0;
}
spin_unlock(&block_rsv->lock);
return ret;
}
static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes, int update_size)
{
spin_lock(&block_rsv->lock);
block_rsv->reserved += num_bytes;
if (update_size)
block_rsv->size += num_bytes;
else if (block_rsv->reserved >= block_rsv->size)
block_rsv->full = 1;
spin_unlock(&block_rsv->lock);
}
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor)
{
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 min_bytes;
if (global_rsv->space_info != dest->space_info)
return -ENOSPC;
spin_lock(&global_rsv->lock);
min_bytes = div_factor(global_rsv->size, min_factor);
if (global_rsv->reserved < min_bytes + num_bytes) {
spin_unlock(&global_rsv->lock);
return -ENOSPC;
}
global_rsv->reserved -= num_bytes;
if (global_rsv->reserved < global_rsv->size)
global_rsv->full = 0;
spin_unlock(&global_rsv->lock);
block_rsv_add_bytes(dest, num_bytes, 1);
return 0;
}
/*
* This is for space we already have accounted in space_info->bytes_may_use, so
* basically when we're returning space from block_rsv's.
*/
static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
struct reserve_ticket *ticket;
struct list_head *head;
u64 used;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
bool check_overcommit = false;
spin_lock(&space_info->lock);
head = &space_info->priority_tickets;
/*
* If we are over our limit then we need to check and see if we can
* overcommit, and if we can't then we just need to free up our space
* and not satisfy any requests.
*/
used = btrfs_space_info_used(space_info, true);
if (used - num_bytes >= space_info->total_bytes)
check_overcommit = true;
again:
while (!list_empty(head) && num_bytes) {
ticket = list_first_entry(head, struct reserve_ticket,
list);
/*
* We use 0 bytes because this space is already reserved, so
* adding the ticket space would be a double count.
*/
if (check_overcommit &&
!can_overcommit(fs_info, space_info, 0, flush, false))
break;
if (num_bytes >= ticket->bytes) {
list_del_init(&ticket->list);
num_bytes -= ticket->bytes;
ticket->bytes = 0;
space_info->tickets_id++;
wake_up(&ticket->wait);
} else {
ticket->bytes -= num_bytes;
num_bytes = 0;
}
}
if (num_bytes && head == &space_info->priority_tickets) {
head = &space_info->tickets;
flush = BTRFS_RESERVE_FLUSH_ALL;
goto again;
}
space_info->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, num_bytes, 0);
spin_unlock(&space_info->lock);
}
/*
* This is for newly allocated space that isn't accounted in
* space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
* we use this helper.
*/
static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
struct reserve_ticket *ticket;
struct list_head *head = &space_info->priority_tickets;
again:
while (!list_empty(head) && num_bytes) {
ticket = list_first_entry(head, struct reserve_ticket,
list);
if (num_bytes >= ticket->bytes) {
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags,
ticket->bytes, 1);
list_del_init(&ticket->list);
num_bytes -= ticket->bytes;
space_info->bytes_may_use += ticket->bytes;
ticket->bytes = 0;
space_info->tickets_id++;
wake_up(&ticket->wait);
} else {
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags,
num_bytes, 1);
space_info->bytes_may_use += num_bytes;
ticket->bytes -= num_bytes;
num_bytes = 0;
}
}
if (num_bytes && head == &space_info->priority_tickets) {
head = &space_info->tickets;
goto again;
}
}
static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
struct btrfs_block_rsv *dest, u64 num_bytes)
{
struct btrfs_space_info *space_info = block_rsv->space_info;
u64 ret;
spin_lock(&block_rsv->lock);
if (num_bytes == (u64)-1)
num_bytes = block_rsv->size;
block_rsv->size -= num_bytes;
if (block_rsv->reserved >= block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
block_rsv->reserved = block_rsv->size;
block_rsv->full = 1;
} else {
num_bytes = 0;
}
spin_unlock(&block_rsv->lock);
ret = num_bytes;
if (num_bytes > 0) {
if (dest) {
spin_lock(&dest->lock);
if (!dest->full) {
u64 bytes_to_add;
bytes_to_add = dest->size - dest->reserved;
bytes_to_add = min(num_bytes, bytes_to_add);
dest->reserved += bytes_to_add;
if (dest->reserved >= dest->size)
dest->full = 1;
num_bytes -= bytes_to_add;
}
spin_unlock(&dest->lock);
}
if (num_bytes)
space_info_add_old_bytes(fs_info, space_info,
num_bytes);
}
return ret;
}
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
struct btrfs_block_rsv *dst, u64 num_bytes,
int update_size)
{
int ret;
ret = block_rsv_use_bytes(src, num_bytes);
if (ret)
return ret;
block_rsv_add_bytes(dst, num_bytes, update_size);
return 0;
}
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
{
memset(rsv, 0, sizeof(*rsv));
spin_lock_init(&rsv->lock);
rsv->type = type;
}
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
unsigned short type)
{
btrfs_init_block_rsv(rsv, type);
rsv->space_info = __find_space_info(fs_info,
BTRFS_BLOCK_GROUP_METADATA);
}
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type)
{
struct btrfs_block_rsv *block_rsv;
block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
if (!block_rsv)
return NULL;
btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
return block_rsv;
}
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
if (!rsv)
return;
btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
kfree(rsv);
}
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
{
kfree(rsv);
}
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush)
{
int ret;
if (num_bytes == 0)
return 0;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 1);
return 0;
}
return ret;
}
int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
if (!block_rsv)
return 0;
spin_lock(&block_rsv->lock);
num_bytes = div_factor(block_rsv->size, min_factor);
if (block_rsv->reserved >= num_bytes)
ret = 0;
spin_unlock(&block_rsv->lock);
return ret;
}
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
if (!block_rsv)
return 0;
spin_lock(&block_rsv->lock);
num_bytes = min_reserved;
if (block_rsv->reserved >= num_bytes)
ret = 0;
else
num_bytes -= block_rsv->reserved;
spin_unlock(&block_rsv->lock);
if (!ret)
return 0;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 0);
return 0;
}
return ret;
}
/**
* btrfs_inode_rsv_refill - refill the inode block rsv.
* @inode - the inode we are refilling.
* @flush - the flusing restriction.
*
* Essentially the same as btrfs_block_rsv_refill, except it uses the
* block_rsv->size as the minimum size. We'll either refill the missing amount
* or return if we already have enough space. This will also handle the resreve
* tracepoint for the reserved amount.
*/
static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_root *root = inode->root;
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
u64 num_bytes = 0;
int ret = -ENOSPC;
spin_lock(&block_rsv->lock);
if (block_rsv->reserved < block_rsv->size)
num_bytes = block_rsv->size - block_rsv->reserved;
spin_unlock(&block_rsv->lock);
if (num_bytes == 0)
return 0;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 0);
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 1);
}
return ret;
}
/**
* btrfs_inode_rsv_release - release any excessive reservation.
* @inode - the inode we need to release from.
*
* This is the same as btrfs_block_rsv_release, except that it handles the
* tracepoint for the reservation.
*/
static void btrfs_inode_rsv_release(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
u64 released = 0;
/*
* Since we statically set the block_rsv->size we just want to say we
* are releasing 0 bytes, and then we'll just get the reservation over
* the size free'd.
*/
released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0);
if (released > 0)
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), released, 0);
}
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
{
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
if (global_rsv == block_rsv ||
block_rsv->space_info != global_rsv->space_info)
global_rsv = NULL;
block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
}
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
struct btrfs_space_info *sinfo = block_rsv->space_info;
u64 num_bytes;
/*
* The global block rsv is based on the size of the extent tree, the
* checksum tree and the root tree. If the fs is empty we want to set
* it to a minimal amount for safety.
*/
num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
btrfs_root_used(&fs_info->csum_root->root_item) +
btrfs_root_used(&fs_info->tree_root->root_item);
num_bytes = max_t(u64, num_bytes, SZ_16M);
spin_lock(&sinfo->lock);
spin_lock(&block_rsv->lock);
block_rsv->size = min_t(u64, num_bytes, SZ_512M);
if (block_rsv->reserved < block_rsv->size) {
num_bytes = btrfs_space_info_used(sinfo, true);
if (sinfo->total_bytes > num_bytes) {
num_bytes = sinfo->total_bytes - num_bytes;
num_bytes = min(num_bytes,
block_rsv->size - block_rsv->reserved);
block_rsv->reserved += num_bytes;
sinfo->bytes_may_use += num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
sinfo->flags, num_bytes,
1);
}
} else if (block_rsv->reserved > block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
sinfo->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
sinfo->flags, num_bytes, 0);
block_rsv->reserved = block_rsv->size;
}
if (block_rsv->reserved == block_rsv->size)
block_rsv->full = 1;
else
block_rsv->full = 0;
spin_unlock(&block_rsv->lock);
spin_unlock(&sinfo->lock);
}
static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_space_info *space_info;
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
fs_info->chunk_block_rsv.space_info = space_info;
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
fs_info->global_block_rsv.space_info = space_info;
fs_info->trans_block_rsv.space_info = space_info;
fs_info->empty_block_rsv.space_info = space_info;
fs_info->delayed_block_rsv.space_info = space_info;
fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
if (fs_info->quota_root)
fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
update_global_block_rsv(fs_info);
}
static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
{
block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
(u64)-1);
WARN_ON(fs_info->trans_block_rsv.size > 0);
WARN_ON(fs_info->trans_block_rsv.reserved > 0);
WARN_ON(fs_info->chunk_block_rsv.size > 0);
WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
WARN_ON(fs_info->delayed_block_rsv.size > 0);
WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
}
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
if (!trans->block_rsv) {
ASSERT(!trans->bytes_reserved);
return;
}
if (!trans->bytes_reserved)
return;
ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, trans->bytes_reserved, 0);
btrfs_block_rsv_release(fs_info, trans->block_rsv,
trans->bytes_reserved);
trans->bytes_reserved = 0;
}
/*
* To be called after all the new block groups attached to the transaction
* handle have been created (btrfs_create_pending_block_groups()).
*/
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
if (!trans->chunk_bytes_reserved)
return;
WARN_ON_ONCE(!list_empty(&trans->new_bgs));
block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
trans->chunk_bytes_reserved);
trans->chunk_bytes_reserved = 0;
}
/* Can only return 0 or -ENOSPC */
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
struct btrfs_root *root = inode->root;
/*
* We always use trans->block_rsv here as we will have reserved space
* for our orphan when starting the transaction, using get_block_rsv()
* here will sometimes make us choose the wrong block rsv as we could be
* doing a reloc inode for a non refcounted root.
*/
struct btrfs_block_rsv *src_rsv = trans->block_rsv;
struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
/*
* We need to hold space in order to delete our orphan item once we've
* added it, so this takes the reservation so we can release it later
* when we are truly done with the orphan item.
*/
u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
num_bytes, 1);
return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
}
void btrfs_orphan_release_metadata(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
struct btrfs_root *root = inode->root;
u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
num_bytes, 0);
btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
}
/*
* btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
* root: the root of the parent directory
* rsv: block reservation
* items: the number of items that we need do reservation
* qgroup_reserved: used to return the reserved size in qgroup
*
* This function is used to reserve the space for snapshot/subvolume
* creation and deletion. Those operations are different with the
* common file/directory operations, they change two fs/file trees
* and root tree, the number of items that the qgroup reserves is
* different with the free space reservation. So we can not use
* the space reservation mechanism in start_transaction().
*/
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int items,
u64 *qgroup_reserved,
bool use_global_rsv)
{
u64 num_bytes;
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
/* One for parent inode, two for dir entries */
num_bytes = 3 * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta(root, num_bytes, true);
if (ret)
return ret;
} else {
num_bytes = 0;
}
*qgroup_reserved = num_bytes;
num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
rsv->space_info = __find_space_info(fs_info,
BTRFS_BLOCK_GROUP_METADATA);
ret = btrfs_block_rsv_add(root, rsv, num_bytes,
BTRFS_RESERVE_FLUSH_ALL);
if (ret == -ENOSPC && use_global_rsv)
ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
if (ret && *qgroup_reserved)
btrfs_qgroup_free_meta(root, *qgroup_reserved);
return ret;
}
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
}
static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
struct btrfs_inode *inode)
{
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
u64 reserve_size = 0;
u64 csum_leaves;
unsigned outstanding_extents;
lockdep_assert_held(&inode->lock);
outstanding_extents = inode->outstanding_extents;
if (outstanding_extents)
reserve_size = btrfs_calc_trans_metadata_size(fs_info,
outstanding_extents + 1);
csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
inode->csum_bytes);
reserve_size += btrfs_calc_trans_metadata_size(fs_info,
csum_leaves);
spin_lock(&block_rsv->lock);
block_rsv->size = reserve_size;
spin_unlock(&block_rsv->lock);
}
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
struct btrfs_root *root = inode->root;
unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
bool delalloc_lock = true;
/* If we are a free space inode we need to not flush since we will be in
* the middle of a transaction commit. We also don't need the delalloc
* mutex since we won't race with anybody. We need this mostly to make
* lockdep shut its filthy mouth.
*
* If we have a transaction open (can happen if we call truncate_block
* from truncate), then we need FLUSH_LIMIT so we don't deadlock.
*/
if (btrfs_is_free_space_inode(inode)) {
flush = BTRFS_RESERVE_NO_FLUSH;
delalloc_lock = false;
} else if (current->journal_info) {
flush = BTRFS_RESERVE_FLUSH_LIMIT;
}
if (flush != BTRFS_RESERVE_NO_FLUSH &&
btrfs_transaction_in_commit(fs_info))
schedule_timeout(1);
if (delalloc_lock)
mutex_lock(&inode->delalloc_mutex);
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
/* Add our new extents and calculate the new rsv size. */
spin_lock(&inode->lock);
nr_extents = count_max_extents(num_bytes);
btrfs_mod_outstanding_extents(inode, nr_extents);
inode->csum_bytes += num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
ret = btrfs_qgroup_reserve_meta(root,
nr_extents * fs_info->nodesize, true);
if (ret)
goto out_fail;
}
ret = btrfs_inode_rsv_refill(inode, flush);
if (unlikely(ret)) {
btrfs_qgroup_free_meta(root,
nr_extents * fs_info->nodesize);
goto out_fail;
}
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return 0;
out_fail:
spin_lock(&inode->lock);
nr_extents = count_max_extents(num_bytes);
btrfs_mod_outstanding_extents(inode, -nr_extents);
inode->csum_bytes -= num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
btrfs_inode_rsv_release(inode);
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return ret;
}
/**
* btrfs_delalloc_release_metadata - release a metadata reservation for an inode
* @inode: the inode to release the reservation for.
* @num_bytes: the number of bytes we are releasing.
*
* This will release the metadata reservation for an inode. This can be called
* once we complete IO for a given set of bytes to release their metadata
* reservations, or on error for the same reason.
*/
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&inode->lock);
inode->csum_bytes -= num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
if (btrfs_is_testing(fs_info))
return;
btrfs_inode_rsv_release(inode);
}
/**
* btrfs_delalloc_release_extents - release our outstanding_extents
* @inode: the inode to balance the reservation for.
* @num_bytes: the number of bytes we originally reserved with
*
* When we reserve space we increase outstanding_extents for the extents we may
* add. Once we've set the range as delalloc or created our ordered extents we
* have outstanding_extents to track the real usage, so we use this to free our
* temporarily tracked outstanding_extents. This _must_ be used in conjunction
* with btrfs_delalloc_reserve_metadata.
*/
void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
unsigned num_extents;
spin_lock(&inode->lock);
num_extents = count_max_extents(num_bytes);
btrfs_mod_outstanding_extents(inode, -num_extents);
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
if (btrfs_is_testing(fs_info))
return;
btrfs_inode_rsv_release(inode);
}
/**
* btrfs_delalloc_reserve_space - reserve data and metadata space for
* delalloc
* @inode: inode we're writing to
* @start: start range we are writing to
* @len: how long the range we are writing to
* @reserved: mandatory parameter, record actually reserved qgroup ranges of
* current reservation.
*
* This will do the following things
*
* o reserve space in data space info for num bytes
* and reserve precious corresponding qgroup space
* (Done in check_data_free_space)
*
* o reserve space for metadata space, based on the number of outstanding
* extents and how much csums will be needed
* also reserve metadata space in a per root over-reserve method.
* o add to the inodes->delalloc_bytes
* o add it to the fs_info's delalloc inodes list.
* (Above 3 all done in delalloc_reserve_metadata)
*
* Return 0 for success
* Return <0 for error(-ENOSPC or -EQUOT)
*/
int btrfs_delalloc_reserve_space(struct inode *inode,
struct extent_changeset **reserved, u64 start, u64 len)
{
int ret;
ret = btrfs_check_data_free_space(inode, reserved, start, len);
if (ret < 0)
return ret;
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
if (ret < 0)
btrfs_free_reserved_data_space(inode, *reserved, start, len);
return ret;
}
/**
* btrfs_delalloc_release_space - release data and metadata space for delalloc
* @inode: inode we're releasing space for
* @start: start position of the space already reserved
* @len: the len of the space already reserved
* @release_bytes: the len of the space we consumed or didn't use
*
* This function will release the metadata space that was not used and will
* decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
* list if there are no delalloc bytes left.
* Also it will handle the qgroup reserved space.
*/
void btrfs_delalloc_release_space(struct inode *inode,
struct extent_changeset *reserved,
u64 start, u64 len)
{
btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
btrfs_free_reserved_data_space(inode, reserved, start, len);
}
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *info, u64 bytenr,
u64 num_bytes, int alloc)
{
struct btrfs_block_group_cache *cache = NULL;
u64 total = num_bytes;
u64 old_val;
u64 byte_in_group;
int factor;
/* block accounting for super block */
spin_lock(&info->delalloc_root_lock);
old_val = btrfs_super_bytes_used(info->super_copy);
if (alloc)
old_val += num_bytes;
else
old_val -= num_bytes;
btrfs_set_super_bytes_used(info->super_copy, old_val);
spin_unlock(&info->delalloc_root_lock);
while (total) {
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache)
return -ENOENT;
if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
factor = 2;
else
factor = 1;
/*
* If this block group has free space cache written out, we
* need to make sure to load it if we are removing space. This
* is because we need the unpinning stage to actually add the
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && cache->cached == BTRFS_CACHE_NO)
cache_block_group(cache, 1);
byte_in_group = bytenr - cache->key.objectid;
WARN_ON(byte_in_group > cache->key.offset);
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
if (btrfs_test_opt(info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
old_val = btrfs_block_group_used(&cache->item);
num_bytes = min(total, cache->key.offset - byte_in_group);
if (alloc) {
old_val += num_bytes;
btrfs_set_block_group_used(&cache->item, old_val);
cache->reserved -= num_bytes;
cache->space_info->bytes_reserved -= num_bytes;
cache->space_info->bytes_used += num_bytes;
cache->space_info->disk_used += num_bytes * factor;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
} else {
old_val -= num_bytes;
btrfs_set_block_group_used(&cache->item, old_val);
cache->pinned += num_bytes;
cache->space_info->bytes_pinned += num_bytes;
cache->space_info->bytes_used -= num_bytes;
cache->space_info->disk_used -= num_bytes * factor;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
trace_btrfs_space_reservation(info, "pinned",
cache->space_info->flags,
num_bytes, 1);
percpu_counter_add(&cache->space_info->total_bytes_pinned,
num_bytes);
set_extent_dirty(info->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
}
spin_lock(&trans->transaction->dirty_bgs_lock);
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list,
&trans->transaction->dirty_bgs);
trans->transaction->num_dirty_bgs++;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
/*
* No longer have used bytes in this block group, queue it for
* deletion. We do this after adding the block group to the
* dirty list to avoid races between cleaner kthread and space
* cache writeout.
*/
if (!alloc && old_val == 0) {
spin_lock(&info->unused_bgs_lock);
if (list_empty(&cache->bg_list)) {
btrfs_get_block_group(cache);
list_add_tail(&cache->bg_list,
&info->unused_bgs);
}
spin_unlock(&info->unused_bgs_lock);
}
btrfs_put_block_group(cache);
total -= num_bytes;
bytenr += num_bytes;
}
return 0;
}
static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
{
struct btrfs_block_group_cache *cache;
u64 bytenr;
spin_lock(&fs_info->block_group_cache_lock);
bytenr = fs_info->first_logical_byte;
spin_unlock(&fs_info->block_group_cache_lock);
if (bytenr < (u64)-1)
return bytenr;
cache = btrfs_lookup_first_block_group(fs_info, search_start);
if (!cache)
return 0;
bytenr = cache->key.objectid;
btrfs_put_block_group(cache);
return bytenr;
}
static int pin_down_extent(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
u64 bytenr, u64 num_bytes, int reserved)
{
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
cache->pinned += num_bytes;
cache->space_info->bytes_pinned += num_bytes;
if (reserved) {
cache->reserved -= num_bytes;
cache->space_info->bytes_reserved -= num_bytes;
}
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
trace_btrfs_space_reservation(fs_info, "pinned",
cache->space_info->flags, num_bytes, 1);
percpu_counter_add(&cache->space_info->total_bytes_pinned, num_bytes);
set_extent_dirty(fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0;
}
/*
* this function must be called within transaction
*/
int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, int reserved)
{
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(fs_info, bytenr);
BUG_ON(!cache); /* Logic error */
pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
btrfs_put_block_group(cache);
return 0;
}
/*
* this function must be called within transaction
*/
int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
struct btrfs_block_group_cache *cache;
int ret;
cache = btrfs_lookup_block_group(fs_info, bytenr);
if (!cache)
return -EINVAL;
/*
* pull in the free space cache (if any) so that our pin
* removes the free space from the cache. We have load_only set
* to one because the slow code to read in the free extents does check
* the pinned extents.
*/
cache_block_group(cache, 1);
pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
/* remove us from the free space cache (if we're there at all) */
ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
btrfs_put_block_group(cache);
return ret;
}
static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
int ret;
struct btrfs_block_group_cache *block_group;
struct btrfs_caching_control *caching_ctl;
block_group = btrfs_lookup_block_group(fs_info, start);
if (!block_group)
return -EINVAL;
cache_block_group(block_group, 0);
caching_ctl = get_caching_control(block_group);
if (!caching_ctl) {
/* Logic error */
BUG_ON(!block_group_cache_done(block_group));
ret = btrfs_remove_free_space(block_group, start, num_bytes);
} else {
mutex_lock(&caching_ctl->mutex);
if (start >= caching_ctl->progress) {
ret = add_excluded_extent(fs_info, start, num_bytes);
} else if (start + num_bytes <= caching_ctl->progress) {
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
} else {
num_bytes = caching_ctl->progress - start;
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
if (ret)
goto out_lock;
num_bytes = (start + num_bytes) -
caching_ctl->progress;
start = caching_ctl->progress;
ret = add_excluded_extent(fs_info, start, num_bytes);
}
out_lock:
mutex_unlock(&caching_ctl->mutex);
put_caching_control(caching_ctl);
}
btrfs_put_block_group(block_group);
return ret;
}
int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
struct btrfs_file_extent_item *item;
struct btrfs_key key;
int found_type;
int i;
if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
return 0;
for (i = 0; i < btrfs_header_nritems(eb); i++) {
btrfs_item_key_to_cpu(eb, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(eb, item);
if (found_type == BTRFS_FILE_EXTENT_INLINE)
continue;
if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
continue;
key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
__exclude_logged_extent(fs_info, key.objectid, key.offset);
}
return 0;
}
static void
btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
{
atomic_inc(&bg->reservations);
}
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start)
{
struct btrfs_block_group_cache *bg;
bg = btrfs_lookup_block_group(fs_info, start);
ASSERT(bg);
if (atomic_dec_and_test(&bg->reservations))
wake_up_atomic_t(&bg->reservations);
btrfs_put_block_group(bg);
}
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
{
struct btrfs_space_info *space_info = bg->space_info;
ASSERT(bg->ro);
if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
return;
/*
* Our block group is read only but before we set it to read only,
* some task might have had allocated an extent from it already, but it
* has not yet created a respective ordered extent (and added it to a
* root's list of ordered extents).
* Therefore wait for any task currently allocating extents, since the
* block group's reservations counter is incremented while a read lock
* on the groups' semaphore is held and decremented after releasing
* the read access on that semaphore and creating the ordered extent.
*/
down_write(&space_info->groups_sem);
up_write(&space_info->groups_sem);
wait_on_atomic_t(&bg->reservations, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
}
/**
* btrfs_add_reserved_bytes - update the block_group and space info counters
* @cache: The cache we are manipulating
* @ram_bytes: The number of bytes of file content, and will be same to
* @num_bytes except for the compress path.
* @num_bytes: The number of bytes in question
* @delalloc: The blocks are allocated for the delalloc write
*
* This is called by the allocator when it reserves space. If this is a
* reservation and the block group has become read only we cannot make the
* reservation and return -EAGAIN, otherwise this function always succeeds.
*/
static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 ram_bytes, u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
int ret = 0;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
if (cache->ro) {
ret = -EAGAIN;
} else {
cache->reserved += num_bytes;
space_info->bytes_reserved += num_bytes;
trace_btrfs_space_reservation(cache->fs_info,
"space_info", space_info->flags,
ram_bytes, 0);
space_info->bytes_may_use -= ram_bytes;
if (delalloc)
cache->delalloc_bytes += num_bytes;
}
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
return ret;
}
/**
* btrfs_free_reserved_bytes - update the block_group and space info counters
* @cache: The cache we are manipulating
* @num_bytes: The number of bytes in question
* @delalloc: The blocks are allocated for the delalloc write
*
* This is called by somebody who is freeing space that was never actually used
* on disk. For example if you reserve some space for a new leaf in transaction
* A and before transaction A commits you free that leaf, you call this with
* reserve set to 0 in order to clear the reservation.
*/
static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
int ret = 0;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
if (cache->ro)
space_info->bytes_readonly += num_bytes;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
if (delalloc)
cache->delalloc_bytes -= num_bytes;
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
return ret;
}
void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
{
struct btrfs_caching_control *next;
struct btrfs_caching_control *caching_ctl;
struct btrfs_block_group_cache *cache;
down_write(&fs_info->commit_root_sem);
list_for_each_entry_safe(caching_ctl, next,
&fs_info->caching_block_groups, list) {
cache = caching_ctl->block_group;
if (block_group_cache_done(cache)) {
cache->last_byte_to_unpin = (u64)-1;
list_del_init(&caching_ctl->list);
put_caching_control(caching_ctl);
} else {
cache->last_byte_to_unpin = caching_ctl->progress;
}
}
if (fs_info->pinned_extents == &fs_info->freed_extents[0])
fs_info->pinned_extents = &fs_info->freed_extents[1];
else
fs_info->pinned_extents = &fs_info->freed_extents[0];
up_write(&fs_info->commit_root_sem);
update_global_block_rsv(fs_info);
}
/*
* Returns the free cluster for the given space info and sets empty_cluster to
* what it should be based on the mount options.
*/
static struct btrfs_free_cluster *
fetch_cluster_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 *empty_cluster)
{
struct btrfs_free_cluster *ret = NULL;
*empty_cluster = 0;
if (btrfs_mixed_space_info(space_info))
return ret;
if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
ret = &fs_info->meta_alloc_cluster;
if (btrfs_test_opt(fs_info, SSD))
*empty_cluster = SZ_2M;
else
*empty_cluster = SZ_64K;
} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
btrfs_test_opt(fs_info, SSD_SPREAD)) {
*empty_cluster = SZ_2M;
ret = &fs_info->data_alloc_cluster;
}
return ret;
}
static int unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end,
const bool return_free_space)
{
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_space_info *space_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_free_cluster *cluster = NULL;
u64 len;
u64 total_unpinned = 0;
u64 empty_cluster = 0;
bool readonly;
while (start <= end) {
readonly = false;
if (!cache ||
start >= cache->key.objectid + cache->key.offset) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
cache = btrfs_lookup_block_group(fs_info, start);
BUG_ON(!cache); /* Logic error */
cluster = fetch_cluster_info(fs_info,
cache->space_info,
&empty_cluster);
empty_cluster <<= 1;
}
len = cache->key.objectid + cache->key.offset - start;
len = min(len, end + 1 - start);
if (start < cache->last_byte_to_unpin) {
len = min(len, cache->last_byte_to_unpin - start);
if (return_free_space)
btrfs_add_free_space(cache, start, len);
}
start += len;
total_unpinned += len;
space_info = cache->space_info;
/*
* If this space cluster has been marked as fragmented and we've
* unpinned enough in this block group to potentially allow a
* cluster to be created inside of it go ahead and clear the
* fragmented check.
*/
if (cluster && cluster->fragmented &&
total_unpinned > empty_cluster) {
spin_lock(&cluster->lock);
cluster->fragmented = 0;
spin_unlock(&cluster->lock);
}
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
cache->pinned -= len;
space_info->bytes_pinned -= len;
trace_btrfs_space_reservation(fs_info, "pinned",
space_info->flags, len, 0);
space_info->max_extent_size = 0;
percpu_counter_add(&space_info->total_bytes_pinned, -len);
if (cache->ro) {
space_info->bytes_readonly += len;
readonly = true;
}
spin_unlock(&cache->lock);
if (!readonly && return_free_space &&
global_rsv->space_info == space_info) {
u64 to_add = len;
spin_lock(&global_rsv->lock);
if (!global_rsv->full) {
to_add = min(len, global_rsv->size -
global_rsv->reserved);
global_rsv->reserved += to_add;
space_info->bytes_may_use += to_add;
if (global_rsv->reserved >= global_rsv->size)
global_rsv->full = 1;
trace_btrfs_space_reservation(fs_info,
"space_info",
space_info->flags,
to_add, 1);
len -= to_add;
}
spin_unlock(&global_rsv->lock);
/* Add to any tickets we may have */
if (len)
space_info_add_new_bytes(fs_info, space_info,
len);
}
spin_unlock(&space_info->lock);
}
if (cache)
btrfs_put_block_group(cache);
return 0;
}
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *block_group, *tmp;
struct list_head *deleted_bgs;
struct extent_io_tree *unpin;
u64 start;
u64 end;
int ret;
if (fs_info->pinned_extents == &fs_info->freed_extents[0])
unpin = &fs_info->freed_extents[1];
else
unpin = &fs_info->freed_extents[0];
while (!trans->aborted) {
mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = find_first_extent_bit(unpin, 0, &start, &end,
EXTENT_DIRTY, NULL);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
if (btrfs_test_opt(fs_info, DISCARD))
ret = btrfs_discard_extent(fs_info, start,
end + 1 - start, NULL);
clear_extent_dirty(unpin, start, end);
unpin_extent_range(fs_info, start, end, true);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
/*
* Transaction is finished. We don't need the lock anymore. We
* do need to clean up the block groups in case of a transaction
* abort.
*/
deleted_bgs = &trans->transaction->deleted_bgs;
list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
u64 trimmed = 0;
ret = -EROFS;
if (!trans->aborted)
ret = btrfs_discard_extent(fs_info,
block_group->key.objectid,
block_group->key.offset,
&trimmed);
list_del_init(&block_group->bg_list);
btrfs_put_block_group_trimming(block_group);
btrfs_put_block_group(block_group);
if (ret) {
const char *errstr = btrfs_decode_error(ret);
btrfs_warn(fs_info,
"discard failed while removing blockgroup: errno=%d %s",
ret, errstr);
}
}
return 0;
}
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *info,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_root *extent_root = info->extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
int ret;
int is_data;
int extent_slot = 0;
int found_extent = 0;
int num_to_del = 1;
u32 item_size;
u64 refs;
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
int last_ref = 0;
bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_FORWARD;
path->leave_spinning = 1;
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
BUG_ON(!is_data && refs_to_drop != 1);
if (is_data)
skinny_metadata = false;
ret = lookup_extent_backref(trans, info, path, &iref,
bytenr, num_bytes, parent,
root_objectid, owner_objectid,
owner_offset);
if (ret == 0) {
extent_slot = path->slots[0];
while (extent_slot >= 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key,
extent_slot);
if (key.objectid != bytenr)
break;
if (key.type == BTRFS_EXTENT_ITEM_KEY &&
key.offset == num_bytes) {
found_extent = 1;
break;
}
if (key.type == BTRFS_METADATA_ITEM_KEY &&
key.offset == owner_objectid) {
found_extent = 1;
break;
}
if (path->slots[0] - extent_slot > 5)
break;
extent_slot--;
}
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
if (found_extent && item_size < sizeof(*ei))
found_extent = 0;
#endif
if (!found_extent) {
BUG_ON(iref);
ret = remove_extent_backref(trans, info, path, NULL,
refs_to_drop,
is_data, &last_ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
if (!is_data && skinny_metadata) {
key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = owner_objectid;
}
ret = btrfs_search_slot(trans, extent_root,
&key, path, -1, 1);
if (ret > 0 && skinny_metadata && path->slots[0]) {
/*
* Couldn't find our skinny metadata item,
* see if we have ye olde extent item.
*/
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
if (key.objectid == bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
key.offset == num_bytes)
ret = 0;
}
if (ret > 0 && skinny_metadata) {
skinny_metadata = false;
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
btrfs_release_path(path);
ret = btrfs_search_slot(trans, extent_root,
&key, path, -1, 1);
}
if (ret) {
btrfs_err(info,
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
if (ret > 0)
btrfs_print_leaf(path->nodes[0]);
}
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
goto out;
}
extent_slot = path->slots[0];
}
} else if (WARN_ON(ret == -ENOENT)) {
btrfs_print_leaf(path->nodes[0]);
btrfs_err(info,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
bytenr, parent, root_objectid, owner_objectid,
owner_offset);
btrfs_abort_transaction(trans, ret);
goto out;
} else {
btrfs_abort_transaction(trans, ret);
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, extent_slot);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
BUG_ON(found_extent || extent_slot != path->slots[0]);
ret = convert_extent_item_v0(trans, info, path, owner_objectid,
0);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
ret = btrfs_search_slot(trans, extent_root, &key, path,
-1, 1);
if (ret) {
btrfs_err(info,
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
btrfs_print_leaf(path->nodes[0]);
}
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
goto out;
}
extent_slot = path->slots[0];
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, extent_slot);
}
#endif
BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, extent_slot,
struct btrfs_extent_item);
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
key.type == BTRFS_EXTENT_ITEM_KEY) {
struct btrfs_tree_block_info *bi;
BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
bi = (struct btrfs_tree_block_info *)(ei + 1);
WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
}
refs = btrfs_extent_refs(leaf, ei);
if (refs < refs_to_drop) {
btrfs_err(info,
"trying to drop %d refs but we only have %Lu for bytenr %Lu",
refs_to_drop, refs, bytenr);
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
refs -= refs_to_drop;
if (refs > 0) {
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei);
/*
* In the case of inline back ref, reference count will
* be updated by remove_extent_backref
*/
if (iref) {
BUG_ON(!found_extent);
} else {
btrfs_set_extent_refs(leaf, ei, refs);
btrfs_mark_buffer_dirty(leaf);
}
if (found_extent) {
ret = remove_extent_backref(trans, info, path,
iref, refs_to_drop,
is_data, &last_ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
} else {
if (found_extent) {
BUG_ON(is_data && refs_to_drop !=
extent_data_ref_count(path, iref));
if (iref) {
BUG_ON(path->slots[0] != extent_slot);
} else {
BUG_ON(path->slots[0] != extent_slot + 1);
path->slots[0] = extent_slot;
num_to_del = 2;
}
}
last_ref = 1;
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
if (is_data) {
ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = add_to_free_space_tree(trans, info, bytenr, num_bytes);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
ret = update_block_group(trans, info, bytenr, num_bytes, 0);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
btrfs_release_path(path);
out:
btrfs_free_path(path);
return ret;
}
/*
* when we free an block, it is possible (and likely) that we free the last
* delayed ref for that extent as well. This searches the delayed ref tree for
* a given extent, and if there are no other delayed refs to be processed, it
* removes it from the tree.
*/
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
u64 bytenr)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
int ret = 0;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (!head)
goto out_delayed_unlock;
spin_lock(&head->lock);
if (!RB_EMPTY_ROOT(&head->ref_tree))
goto out;
if (head->extent_op) {
if (!head->must_insert_reserved)
goto out;
btrfs_free_delayed_extent_op(head->extent_op);
head->extent_op = NULL;
}
/*
* waiting for the lock here would deadlock. If someone else has it
* locked they are already in the process of dropping it anyway
*/
if (!mutex_trylock(&head->mutex))
goto out;
/*
* at this point we have a head with no other entries. Go
* ahead and process it.
*/
rb_erase(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
atomic_dec(&delayed_refs->num_entries);
/*
* we don't take a ref on the node because we're removing it from the
* tree, so we just steal the ref the tree was holding.
*/
delayed_refs->num_heads--;
if (head->processing == 0)
delayed_refs->num_heads_ready--;
head->processing = 0;
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
BUG_ON(head->extent_op);
if (head->must_insert_reserved)
ret = 1;
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
out:
spin_unlock(&head->lock);
out_delayed_unlock:
spin_unlock(&delayed_refs->lock);
return 0;
}
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
u64 parent, int last_ref)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int pin = 1;
int ret;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
int old_ref_mod, new_ref_mod;
btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
root->root_key.objectid,
btrfs_header_level(buf), 0,
BTRFS_DROP_DELAYED_REF);
ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
buf->len, parent,
root->root_key.objectid,
btrfs_header_level(buf),
BTRFS_DROP_DELAYED_REF, NULL,
&old_ref_mod, &new_ref_mod);
BUG_ON(ret); /* -ENOMEM */
pin = old_ref_mod >= 0 && new_ref_mod < 0;
}
if (last_ref && btrfs_header_generation(buf) == trans->transid) {
struct btrfs_block_group_cache *cache;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
ret = check_ref_cleanup(trans, buf->start);
if (!ret)
goto out;
}
pin = 0;
cache = btrfs_lookup_block_group(fs_info, buf->start);
if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
pin_down_extent(fs_info, cache, buf->start,
buf->len, 1);
btrfs_put_block_group(cache);
goto out;
}
WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
btrfs_add_free_space(cache, buf->start, buf->len);
btrfs_free_reserved_bytes(cache, buf->len, 0);
btrfs_put_block_group(cache);
trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
}
out:
if (pin)
add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
root->root_key.objectid);
if (last_ref) {
/*
* Deleting the buffer, clear the corrupt flag since it doesn't
* matter anymore.
*/
clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
}
}
/* Can return -ENOMEM */
int btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int old_ref_mod, new_ref_mod;
int ret;
if (btrfs_is_testing(fs_info))
return 0;
if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
root_objectid, owner, offset,
BTRFS_DROP_DELAYED_REF);
/*
* tree log blocks never actually go into the extent allocation
* tree, just update pinning info and exit early.
*/
if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
/* unlocks the pinned mutex */
btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
old_ref_mod = new_ref_mod = 0;
ret = 0;
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
num_bytes, parent,
root_objectid, (int)owner,
BTRFS_DROP_DELAYED_REF, NULL,
&old_ref_mod, &new_ref_mod);
} else {
ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
num_bytes, parent,
root_objectid, owner, offset,
0, BTRFS_DROP_DELAYED_REF,
&old_ref_mod, &new_ref_mod);
}
if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
return ret;
}
/*
* when we wait for progress in the block group caching, its because
* our allocation attempt failed at least once. So, we must sleep
* and let some progress happen before we try again.
*
* This function will sleep at least once waiting for new free space to
* show up, and then it will check the block group free space numbers
* for our min num_bytes. Another option is to have it go ahead
* and look in the rbtree for a free extent of a given size, but this
* is a good start.
*
* Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
* any of the information in this block group.
*/
static noinline void
wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
u64 num_bytes)
{
struct btrfs_caching_control *caching_ctl;
caching_ctl = get_caching_control(cache);
if (!caching_ctl)
return;
wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
(cache->free_space_ctl->free_space >= num_bytes));
put_caching_control(caching_ctl);
}
static noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
{
struct btrfs_caching_control *caching_ctl;
int ret = 0;
caching_ctl = get_caching_control(cache);
if (!caching_ctl)
return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
wait_event(caching_ctl->wait, block_group_cache_done(cache));
if (cache->cached == BTRFS_CACHE_ERROR)
ret = -EIO;
put_caching_control(caching_ctl);
return ret;
}
int __get_raid_index(u64 flags)
{
if (flags & BTRFS_BLOCK_GROUP_RAID10)
return BTRFS_RAID_RAID10;
else if (flags & BTRFS_BLOCK_GROUP_RAID1)
return BTRFS_RAID_RAID1;
else if (flags & BTRFS_BLOCK_GROUP_DUP)
return BTRFS_RAID_DUP;
else if (flags & BTRFS_BLOCK_GROUP_RAID0)
return BTRFS_RAID_RAID0;
else if (flags & BTRFS_BLOCK_GROUP_RAID5)
return BTRFS_RAID_RAID5;
else if (flags & BTRFS_BLOCK_GROUP_RAID6)
return BTRFS_RAID_RAID6;
return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
}
int get_block_group_index(struct btrfs_block_group_cache *cache)
{
return __get_raid_index(cache->flags);
}
static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = "raid10",
[BTRFS_RAID_RAID1] = "raid1",
[BTRFS_RAID_DUP] = "dup",
[BTRFS_RAID_RAID0] = "raid0",
[BTRFS_RAID_SINGLE] = "single",
[BTRFS_RAID_RAID5] = "raid5",
[BTRFS_RAID_RAID6] = "raid6",
};
static const char *get_raid_name(enum btrfs_raid_types type)
{
if (type >= BTRFS_NR_RAID_TYPES)
return NULL;
return btrfs_raid_type_names[type];
}
enum btrfs_loop_type {
LOOP_CACHING_NOWAIT = 0,
LOOP_CACHING_WAIT = 1,
LOOP_ALLOC_CHUNK = 2,
LOOP_NO_EMPTY_SIZE = 3,
};
static inline void
btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
int delalloc)
{
if (delalloc)
down_read(&cache->data_rwsem);
}
static inline void
btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
int delalloc)
{
btrfs_get_block_group(cache);
if (delalloc)
down_read(&cache->data_rwsem);
}
static struct btrfs_block_group_cache *
btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
int delalloc)
{
struct btrfs_block_group_cache *used_bg = NULL;
spin_lock(&cluster->refill_lock);
while (1) {
used_bg = cluster->block_group;
if (!used_bg)
return NULL;
if (used_bg == block_group)
return used_bg;
btrfs_get_block_group(used_bg);
if (!delalloc)
return used_bg;
if (down_read_trylock(&used_bg->data_rwsem))
return used_bg;
spin_unlock(&cluster->refill_lock);
/* We should only have one-level nested. */
down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
spin_lock(&cluster->refill_lock);
if (used_bg == cluster->block_group)
return used_bg;
up_read(&used_bg->data_rwsem);
btrfs_put_block_group(used_bg);
}
}
static inline void
btrfs_release_block_group(struct btrfs_block_group_cache *cache,
int delalloc)
{
if (delalloc)
up_read(&cache->data_rwsem);
btrfs_put_block_group(cache);
}
/*
* walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole:
* ins->objectid == start position
* ins->flags = BTRFS_EXTENT_ITEM_KEY
* ins->offset == the size of the hole.
* Any available blocks before search_start are skipped.
*
* If there is no suitable free space, we will record the max size of
* the free space extent currently.
*/
static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
u64 ram_bytes, u64 num_bytes, u64 empty_size,
u64 hint_byte, struct btrfs_key *ins,
u64 flags, int delalloc)
{
int ret = 0;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
u64 search_start = 0;
u64 max_extent_size = 0;
u64 empty_cluster = 0;
struct btrfs_space_info *space_info;
int loop = 0;
int index = __get_raid_index(flags);
bool failed_cluster_refill = false;
bool failed_alloc = false;
bool use_cluster = true;
bool have_caching_bg = false;
bool orig_have_caching_bg = false;
bool full_search = false;
WARN_ON(num_bytes < fs_info->sectorsize);
ins->type = BTRFS_EXTENT_ITEM_KEY;
ins->objectid = 0;
ins->offset = 0;
trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
space_info = __find_space_info(fs_info, flags);
if (!space_info) {
btrfs_err(fs_info, "No space info for %llu", flags);
return -ENOSPC;
}
/*
* If our free space is heavily fragmented we may not be able to make
* big contiguous allocations, so instead of doing the expensive search
* for free space, simply return ENOSPC with our max_extent_size so we
* can go ahead and search for a more manageable chunk.
*
* If our max_extent_size is large enough for our allocation simply
* disable clustering since we will likely not be able to find enough
* space to create a cluster and induce latency trying.
*/
if (unlikely(space_info->max_extent_size)) {
spin_lock(&space_info->lock);
if (space_info->max_extent_size &&
num_bytes > space_info->max_extent_size) {
ins->offset = space_info->max_extent_size;
spin_unlock(&space_info->lock);
return -ENOSPC;
} else if (space_info->max_extent_size) {
use_cluster = false;
}
spin_unlock(&space_info->lock);
}
last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
if (last_ptr) {
spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
hint_byte = last_ptr->window_start;
if (last_ptr->fragmented) {
/*
* We still set window_start so we can keep track of the
* last place we found an allocation to try and save
* some time.
*/
hint_byte = last_ptr->window_start;
use_cluster = false;
}
spin_unlock(&last_ptr->lock);
}
search_start = max(search_start, first_logical_byte(fs_info, 0));
search_start = max(search_start, hint_byte);
if (search_start == hint_byte) {
block_group = btrfs_lookup_block_group(fs_info, search_start);
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
*
* However if we are re-searching with an ideal block group
* picked out then we don't care that the block group is cached.
*/
if (block_group && block_group_bits(block_group, flags) &&
block_group->cached != BTRFS_CACHE_NO) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
block_group->ro) {
/*
* someone is removing this block group,
* we can't jump into the have_block_group
* target because our list pointers are not
* valid
*/
btrfs_put_block_group(block_group);
up_read(&space_info->groups_sem);
} else {
index = get_block_group_index(block_group);
btrfs_lock_block_group(block_group, delalloc);
goto have_block_group;
}
} else if (block_group) {
btrfs_put_block_group(block_group);
}
}
search:
have_caching_bg = false;
if (index == 0 || index == __get_raid_index(flags))
full_search = true;
down_read(&space_info->groups_sem);
list_for_each_entry(block_group, &space_info->block_groups[index],
list) {
u64 offset;
int cached;
/* If the block group is read-only, we can skip it entirely. */
if (unlikely(block_group->ro))
continue;
btrfs_grab_block_group(block_group, delalloc);
search_start = block_group->key.objectid;
/*
* this can happen if we end up cycling through all the
* raid types, but we want to make sure we only allocate
* for the proper type.
*/
if (!block_group_bits(block_group, flags)) {
u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID10;
/*
* if they asked for extra copies and this block group
* doesn't provide them, bail. This does allow us to
* fill raid0 from raid1.
*/
if ((flags & extra) && !(block_group->flags & extra))
goto loop;
}
have_block_group:
cached = block_group_cache_done(block_group);
if (unlikely(!cached)) {
have_caching_bg = true;
ret = cache_block_group(block_group, 0);
BUG_ON(ret < 0);
ret = 0;
}
if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
goto loop;
/*
* Ok we want to try and use the cluster allocator, so
* lets look there
*/
if (last_ptr && use_cluster) {
struct btrfs_block_group_cache *used_block_group;
unsigned long aligned_cluster;
/*
* the refill lock keeps out other
* people trying to start a new cluster
*/
used_block_group = btrfs_lock_cluster(block_group,
last_ptr,
delalloc);
if (!used_block_group)
goto refill_cluster;
if (used_block_group != block_group &&
(used_block_group->ro ||
!block_group_bits(used_block_group, flags)))
goto release_cluster;
offset = btrfs_alloc_from_cluster(used_block_group,
last_ptr,
num_bytes,
used_block_group->key.objectid,
&max_extent_size);
if (offset) {
/* we have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
trace_btrfs_reserve_extent_cluster(fs_info,
used_block_group,
search_start, num_bytes);
if (used_block_group != block_group) {
btrfs_release_block_group(block_group,
delalloc);
block_group = used_block_group;
}
goto checks;
}
WARN_ON(last_ptr->block_group != used_block_group);
release_cluster:
/* If we are on LOOP_NO_EMPTY_SIZE, we can't
* set up a new clusters, so lets just skip it
* and let the allocator find whatever block
* it can find. If we reach this point, we
* will have tried the cluster allocator
* plenty of times and not have found
* anything, so we are likely way too
* fragmented for the clustering stuff to find
* anything.
*
* However, if the cluster is taken from the
* current block group, release the cluster
* first, so that we stand a better chance of
* succeeding in the unclustered
* allocation. */
if (loop >= LOOP_NO_EMPTY_SIZE &&
used_block_group != block_group) {
spin_unlock(&last_ptr->refill_lock);
btrfs_release_block_group(used_block_group,
delalloc);
goto unclustered_alloc;
}
/*
* this cluster didn't work out, free it and
* start over
*/
btrfs_return_cluster_to_free_space(NULL, last_ptr);
if (used_block_group != block_group)
btrfs_release_block_group(used_block_group,
delalloc);
refill_cluster:
if (loop >= LOOP_NO_EMPTY_SIZE) {
spin_unlock(&last_ptr->refill_lock);
goto unclustered_alloc;
}
aligned_cluster = max_t(unsigned long,
empty_cluster + empty_size,
block_group->full_stripe_len);
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(fs_info, block_group,
last_ptr, search_start,
num_bytes,
aligned_cluster);
if (ret == 0) {
/*
* now pull our allocation out of this
* cluster
*/
offset = btrfs_alloc_from_cluster(block_group,
last_ptr,
num_bytes,
search_start,
&max_extent_size);
if (offset) {
/* we found one, proceed */
spin_unlock(&last_ptr->refill_lock);
trace_btrfs_reserve_extent_cluster(fs_info,
block_group, search_start,
num_bytes);
goto checks;
}
} else if (!cached && loop > LOOP_CACHING_NOWAIT
&& !failed_cluster_refill) {
spin_unlock(&last_ptr->refill_lock);
failed_cluster_refill = true;
wait_block_group_cache_progress(block_group,
num_bytes + empty_cluster + empty_size);
goto have_block_group;
}
/*
* at this point we either didn't find a cluster
* or we weren't able to allocate a block from our
* cluster. Free the cluster we've been trying
* to use, and go to the next block group
*/
btrfs_return_cluster_to_free_space(NULL, last_ptr);
spin_unlock(&last_ptr->refill_lock);
goto loop;
}
unclustered_alloc:
/*
* We are doing an unclustered alloc, set the fragmented flag so
* we don't bother trying to setup a cluster again until we get
* more space.
*/
if (unlikely(last_ptr)) {
spin_lock(&last_ptr->lock);
last_ptr->fragmented = 1;
spin_unlock(&last_ptr->lock);
}
if (cached) {
struct btrfs_free_space_ctl *ctl =
block_group->free_space_ctl;
spin_lock(&ctl->tree_lock);
if (ctl->free_space <
num_bytes + empty_cluster + empty_size) {
if (ctl->free_space > max_extent_size)
max_extent_size = ctl->free_space;
spin_unlock(&ctl->tree_lock);
goto loop;
}
spin_unlock(&ctl->tree_lock);
}
offset = btrfs_find_space_for_alloc(block_group, search_start,
num_bytes, empty_size,
&max_extent_size);
/*
* If we didn't find a chunk, and we haven't failed on this
* block group before, and this block group is in the middle of
* caching and we are ok with waiting, then go ahead and wait
* for progress to be made, and set failed_alloc to true.
*
* If failed_alloc is true then we've already waited on this
* block group once and should move on to the next block group.
*/
if (!offset && !failed_alloc && !cached &&
loop > LOOP_CACHING_NOWAIT) {
wait_block_group_cache_progress(block_group,
num_bytes + empty_size);
failed_alloc = true;
goto have_block_group;
} else if (!offset) {
goto loop;
}
checks:
search_start = ALIGN(offset, fs_info->stripesize);
/* move on to the next group */
if (search_start + num_bytes >
block_group->key.objectid + block_group->key.offset) {
btrfs_add_free_space(block_group, offset, num_bytes);
goto loop;
}
if (offset < search_start)
btrfs_add_free_space(block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
num_bytes, delalloc);
if (ret == -EAGAIN) {
btrfs_add_free_space(block_group, offset, num_bytes);
goto loop;
}
btrfs_inc_block_group_reservations(block_group);
/* we are all good, lets return */
ins->objectid = search_start;
ins->offset = num_bytes;
trace_btrfs_reserve_extent(fs_info, block_group,
search_start, num_bytes);
btrfs_release_block_group(block_group, delalloc);
break;
loop:
failed_cluster_refill = false;
failed_alloc = false;
BUG_ON(index != get_block_group_index(block_group));
btrfs_release_block_group(block_group, delalloc);
cond_resched();
}
up_read(&space_info->groups_sem);
if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
&& !orig_have_caching_bg)
orig_have_caching_bg = true;
if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
goto search;
if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
goto search;
/*
* LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
* caching kthreads as we move along
* LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
* LOOP_ALLOC_CHUNK, force a chunk allocation and try again
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
* again
*/
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
index = 0;
if (loop == LOOP_CACHING_NOWAIT) {
/*
* We want to skip the LOOP_CACHING_WAIT step if we
* don't have any uncached bgs and we've already done a
* full search through.
*/
if (orig_have_caching_bg || !full_search)
loop = LOOP_CACHING_WAIT;
else
loop = LOOP_ALLOC_CHUNK;
} else {
loop++;
}
if (loop == LOOP_ALLOC_CHUNK) {
struct btrfs_trans_handle *trans;
int exist = 0;
trans = current->journal_info;
if (trans)
exist = 1;
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = do_chunk_alloc(trans, fs_info, flags,
CHUNK_ALLOC_FORCE);
/*
* If we can't allocate a new chunk we've already looped
* through at least once, move on to the NO_EMPTY_SIZE
* case.
*/
if (ret == -ENOSPC)
loop = LOOP_NO_EMPTY_SIZE;
/*
* Do not bail out on ENOSPC since we
* can do more things.
*/
if (ret < 0 && ret != -ENOSPC)
btrfs_abort_transaction(trans, ret);
else
ret = 0;
if (!exist)
btrfs_end_transaction(trans);
if (ret)
goto out;
}
if (loop == LOOP_NO_EMPTY_SIZE) {
/*
* Don't loop again if we already have no empty_size and
* no empty_cluster.
*/
if (empty_size == 0 &&
empty_cluster == 0) {
ret = -ENOSPC;
goto out;
}
empty_size = 0;
empty_cluster = 0;
}
goto search;
} else if (!ins->objectid) {
ret = -ENOSPC;
} else if (ins->objectid) {
if (!use_cluster && last_ptr) {
spin_lock(&last_ptr->lock);
last_ptr->window_start = ins->objectid;
spin_unlock(&last_ptr->lock);
}
ret = 0;
}
out:
if (ret == -ENOSPC) {
spin_lock(&space_info->lock);
space_info->max_extent_size = max_extent_size;
spin_unlock(&space_info->lock);
ins->offset = max_extent_size;
}
return ret;
}
static void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups)
{
struct btrfs_block_group_cache *cache;
int index = 0;
spin_lock(&info->lock);
btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
info->flags,
info->total_bytes - btrfs_space_info_used(info, true),
info->full ? "" : "not ");
btrfs_info(fs_info,
"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
info->total_bytes, info->bytes_used, info->bytes_pinned,
info->bytes_reserved, info->bytes_may_use,
info->bytes_readonly);
spin_unlock(&info->lock);
if (!dump_block_groups)
return;
down_read(&info->groups_sem);
again:
list_for_each_entry(cache, &info->block_groups[index], list) {
spin_lock(&cache->lock);
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
cache->key.objectid, cache->key.offset,
btrfs_block_group_used(&cache->item), cache->pinned,
cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
}
if (++index < BTRFS_NR_RAID_TYPES)
goto again;
up_read(&info->groups_sem);
}
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data, int delalloc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
bool final_tried = num_bytes == min_alloc_size;
u64 flags;
int ret;
flags = get_alloc_profile_by_root(root, is_data);
again:
WARN_ON(num_bytes < fs_info->sectorsize);
ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
hint_byte, ins, flags, delalloc);
if (!ret && !is_data) {
btrfs_dec_block_group_reservations(fs_info, ins->objectid);
} else if (ret == -ENOSPC) {
if (!final_tried && ins->offset) {
num_bytes = min(num_bytes >> 1, ins->offset);
num_bytes = round_down(num_bytes,
fs_info->sectorsize);
num_bytes = max(num_bytes, min_alloc_size);
ram_bytes = num_bytes;
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(fs_info, flags);
btrfs_err(fs_info,
"allocation failed flags %llu, wanted %llu",
flags, num_bytes);
if (sinfo)
dump_space_info(fs_info, sinfo, num_bytes, 1);
}
}
return ret;
}
static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len,
int pin, int delalloc)
{
struct btrfs_block_group_cache *cache;
int ret = 0;
cache = btrfs_lookup_block_group(fs_info, start);
if (!cache) {
btrfs_err(fs_info, "Unable to find block group for %llu",
start);
return -ENOSPC;
}
if (pin)
pin_down_extent(fs_info, cache, start, len, 1);
else {
if (btrfs_test_opt(fs_info, DISCARD))
ret = btrfs_discard_extent(fs_info, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_free_reserved_bytes(cache, len, delalloc);
trace_btrfs_reserved_extent_free(fs_info, start, len);
}
btrfs_put_block_group(cache);
return ret;
}
int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc)
{
return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
}
int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
}
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod)
{
int ret;
struct btrfs_extent_item *extent_item;
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
struct extent_buffer *leaf;
int type;
u32 size;
if (parent > 0)
type = BTRFS_SHARED_DATA_REF_KEY;
else
type = BTRFS_EXTENT_DATA_REF_KEY;
size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
ins, size);
if (ret) {
btrfs_free_path(path);
return ret;
}
leaf = path->nodes[0];
extent_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_item);
btrfs_set_extent_refs(leaf, extent_item, ref_mod);
btrfs_set_extent_generation(leaf, extent_item, trans->transid);
btrfs_set_extent_flags(leaf, extent_item,
flags | BTRFS_EXTENT_FLAG_DATA);
iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
btrfs_set_extent_inline_ref_type(leaf, iref, type);
if (parent > 0) {
struct btrfs_shared_data_ref *ref;
ref = (struct btrfs_shared_data_ref *)(iref + 1);
btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
} else {
struct btrfs_extent_data_ref *ref;
ref = (struct btrfs_extent_data_ref *)(&iref->offset);
btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
btrfs_set_extent_data_ref_offset(leaf, ref, offset);
btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
}
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
ins->offset);
if (ret)
return ret;
ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
btrfs_err(fs_info, "update block group failed for %llu %llu",
ins->objectid, ins->offset);
BUG();
}
trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
return ret;
}
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins)
{
int ret;
struct btrfs_extent_item *extent_item;
struct btrfs_tree_block_info *block_info;
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
struct extent_buffer *leaf;
u32 size = sizeof(*extent_item) + sizeof(*iref);
u64 num_bytes = ins->offset;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
if (!skinny_metadata)
size += sizeof(*block_info);
path = btrfs_alloc_path();
if (!path) {
btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
fs_info->nodesize);
return -ENOMEM;
}
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
ins, size);
if (ret) {
btrfs_free_path(path);
btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
fs_info->nodesize);
return ret;
}
leaf = path->nodes[0];
extent_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_item);
btrfs_set_extent_refs(leaf, extent_item, 1);
btrfs_set_extent_generation(leaf, extent_item, trans->transid);
btrfs_set_extent_flags(leaf, extent_item,
flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
if (skinny_metadata) {
iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
num_bytes = fs_info->nodesize;
} else {
block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
btrfs_set_tree_block_key(leaf, block_info, key);
btrfs_set_tree_block_level(leaf, block_info, level);
iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
}
if (parent > 0) {
BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_SHARED_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
} else {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_TREE_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
num_bytes);
if (ret)
return ret;
ret = update_block_group(trans, fs_info, ins->objectid,
fs_info->nodesize, 1);
if (ret) { /* -ENOENT, logic error */
btrfs_err(fs_info, "update block group failed for %llu %llu",
ins->objectid, ins->offset);
BUG();
}
trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid,
fs_info->nodesize);
return ret;
}
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
root->root_key.objectid, owner, offset,
BTRFS_ADD_DELAYED_EXTENT);
ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
ins->offset, 0,
root->root_key.objectid, owner,
offset, ram_bytes,
BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
return ret;
}
/*
* this is used by the tree logging recovery code. It records that
* an extent has been allocated and makes sure to clear the free
* space cache bits as well
*/
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins)
{
int ret;
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
/*
* Mixed block groups will exclude before processing the log so we only
* need to do the exclude dance if this fs isn't mixed.
*/
if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
ret = __exclude_logged_extent(fs_info, ins->objectid,
ins->offset);
if (ret)
return ret;
}
block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
if (!block_group)
return -EINVAL;
space_info = block_group->space_info;
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
space_info->bytes_reserved += ins->offset;
block_group->reserved += ins->offset;
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
0, owner, offset, ins, 1);
btrfs_put_block_group(block_group);
return ret;
}
static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 bytenr, int level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return buf;
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
clean_tree_block(fs_info, buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
btrfs_set_lock_blocking(buf);
set_extent_buffer_uptodate(buf);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
buf->log_index = root->log_transid % 2;
/*
* we allow two log transactions at a time, use different
* EXENT bit to differentiate dirty pages.
*/
if (buf->log_index == 0)
set_extent_dirty(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
else
set_extent_new(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1);
} else {
buf->log_index = -1;
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
}
trans->dirty = true;
/* this returns a buffer locked for blocking */
return buf;
}
static struct btrfs_block_rsv *
use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u32 blocksize)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
int ret;
bool global_updated = false;
block_rsv = get_block_rsv(trans, root);
if (unlikely(block_rsv->size == 0))
goto try_reserve;
again:
ret = block_rsv_use_bytes(block_rsv, blocksize);
if (!ret)
return block_rsv;
if (block_rsv->failfast)
return ERR_PTR(ret);
if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
global_updated = true;
update_global_block_rsv(fs_info);
goto again;
}
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
/*DEFAULT_RATELIMIT_BURST*/ 1);
if (__ratelimit(&_rs))
WARN(1, KERN_DEBUG
"BTRFS: block rsv returned %d\n", ret);
}
try_reserve:
ret = reserve_metadata_bytes(root, block_rsv, blocksize,
BTRFS_RESERVE_NO_FLUSH);
if (!ret)
return block_rsv;
/*
* If we couldn't reserve metadata bytes try and use some from
* the global reserve if its space type is the same as the global
* reservation.
*/
if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
block_rsv->space_info == global_rsv->space_info) {
ret = block_rsv_use_bytes(global_rsv, blocksize);
if (!ret)
return global_rsv;
}
return ERR_PTR(ret);
}
static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u32 blocksize)
{
block_rsv_add_bytes(block_rsv, blocksize, 0);
block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
}
/*
* finds a free extent and does all the dirty work required for allocation
* returns the tree buffer or an ERR_PTR on error.
*/
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 parent, u64 root_objectid,
const struct btrfs_disk_key *key,
int level, u64 hint,
u64 empty_size)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key ins;
struct btrfs_block_rsv *block_rsv;
struct extent_buffer *buf;
struct btrfs_delayed_extent_op *extent_op;
u64 flags = 0;
int ret;
u32 blocksize = fs_info->nodesize;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (btrfs_is_testing(fs_info)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
level);
if (!IS_ERR(buf))
root->alloc_bytenr += blocksize;
return buf;
}
#endif
block_rsv = use_block_rsv(trans, root, blocksize);
if (IS_ERR(block_rsv))
return ERR_CAST(block_rsv);
ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
empty_size, hint, &ins, 0, 0);
if (ret)
goto out_unuse;
buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_free_reserved;
}
if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent == 0)
parent = ins.objectid;
flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
} else
BUG_ON(parent > 0);
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
extent_op = btrfs_alloc_delayed_extent_op();
if (!extent_op) {
ret = -ENOMEM;
goto out_free_buf;
}
if (key)
memcpy(&extent_op->key, key, sizeof(extent_op->key));
else
memset(&extent_op->key, 0, sizeof(extent_op->key));
extent_op->flags_to_set = flags;
extent_op->update_key = skinny_metadata ? false : true;
extent_op->update_flags = true;
extent_op->is_data = false;
extent_op->level = level;
btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
root_objectid, level, 0,
BTRFS_ADD_DELAYED_EXTENT);
ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
ins.offset, parent,
root_objectid, level,
BTRFS_ADD_DELAYED_EXTENT,
extent_op, NULL, NULL);
if (ret)
goto out_free_delayed;
}
return buf;
out_free_delayed:
btrfs_free_delayed_extent_op(extent_op);
out_free_buf:
free_extent_buffer(buf);
out_free_reserved:
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
out_unuse:
unuse_block_rsv(fs_info, block_rsv, blocksize);
return ERR_PTR(ret);
}
struct walk_control {
u64 refs[BTRFS_MAX_LEVEL];
u64 flags[BTRFS_MAX_LEVEL];
struct btrfs_key update_progress;
int stage;
int level;
int shared_level;
int update_ref;
int keep_locks;
int reada_slot;
int reada_count;
int for_reloc;
};
#define DROP_REFERENCE 1
#define UPDATE_BACKREF 2
static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct walk_control *wc,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 generation;
u64 refs;
u64 flags;
u32 nritems;
struct btrfs_key key;
struct extent_buffer *eb;
int ret;
int slot;
int nread = 0;
if (path->slots[wc->level] < wc->reada_slot) {
wc->reada_count = wc->reada_count * 2 / 3;
wc->reada_count = max(wc->reada_count, 2);
} else {
wc->reada_count = wc->reada_count * 3 / 2;
wc->reada_count = min_t(int, wc->reada_count,
BTRFS_NODEPTRS_PER_BLOCK(fs_info));
}
eb = path->nodes[wc->level];
nritems = btrfs_header_nritems(eb);
for (slot = path->slots[wc->level]; slot < nritems; slot++) {
if (nread >= wc->reada_count)
break;
cond_resched();
bytenr = btrfs_node_blockptr(eb, slot);
generation = btrfs_node_ptr_generation(eb, slot);
if (slot == path->slots[wc->level])
goto reada;
if (wc->stage == UPDATE_BACKREF &&
generation <= root->root_key.offset)
continue;
/* We don't lock the tree block, it's OK to be racy here */
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
wc->level - 1, 1, &refs,
&flags);
/* We don't care about errors in readahead. */
if (ret < 0)
continue;
BUG_ON(refs == 0);
if (wc->stage == DROP_REFERENCE) {
if (refs == 1)
goto reada;
if (wc->level == 1 &&
(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
continue;
if (!wc->update_ref ||
generation <= root->root_key.offset)
continue;
btrfs_node_key_to_cpu(eb, &key, slot);
ret = btrfs_comp_cpu_keys(&key,
&wc->update_progress);
if (ret < 0)
continue;
} else {
if (wc->level == 1 &&
(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
continue;
}
reada:
readahead_tree_block(fs_info, bytenr);
nread++;
}
wc->reada_slot = slot;
}
/*
* helper to process tree block while walking down the tree.
*
* when wc->stage == UPDATE_BACKREF, this function updates
* back refs for pointers in the block.
*
* NOTE: return value 1 means we should stop walking down.
*/
static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc, int lookup_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
int ret;
if (wc->stage == UPDATE_BACKREF &&
btrfs_header_owner(eb) != root->root_key.objectid)
return 1;
/*
* when reference count of tree block is 1, it won't increase
* again. once full backref flag is set, we never clear it.
*/
if (lookup_info &&
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
BUG_ON(!path->locks[level]);
ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
BUG_ON(ret == -ENOMEM);
if (ret)
return ret;
BUG_ON(wc->refs[level] == 0);
}
if (wc->stage == DROP_REFERENCE) {
if (wc->refs[level] > 1)
return 1;
if (path->locks[level] && !wc->keep_locks) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
}
return 0;
}
/* wc->stage == UPDATE_BACKREF */
if (!(wc->flags[level] & flag)) {
BUG_ON(!path->locks[level]);
ret = btrfs_inc_ref(trans, root, eb, 1);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
eb->len, flag,
btrfs_header_level(eb), 0);
BUG_ON(ret); /* -ENOMEM */
wc->flags[level] |= flag;
}
/*
* the block is shared by multiple trees, so it's not good to
* keep the tree lock
*/
if (path->locks[level] && level > 0) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
}
return 0;
}
/*
* helper to process tree block pointer.
*
* when wc->stage == DROP_REFERENCE, this function checks
* reference count of the block pointed to. if the block
* is shared and we need update back refs for the subtree
* rooted at the block, this function changes wc->stage to
* UPDATE_BACKREF. if the block is shared and there is no
* need to update back, this function drops the reference
* to the block.
*
* NOTE: return value 1 means we should stop walking down.
*/
static noinline int do_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc, int *lookup_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 generation;
u64 parent;
u32 blocksize;
struct btrfs_key key;
struct extent_buffer *next;
int level = wc->level;
int reada = 0;
int ret = 0;
bool need_account = false;
generation = btrfs_node_ptr_generation(path->nodes[level],
path->slots[level]);
/*
* if the lower level block was created before the snapshot
* was created, we know there is no need to update back refs
* for the subtree
*/
if (wc->stage == UPDATE_BACKREF &&
generation <= root->root_key.offset) {
*lookup_info = 1;
return 1;
}
bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
blocksize = fs_info->nodesize;
next = find_extent_buffer(fs_info, bytenr);
if (!next) {
next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
level - 1);
reada = 1;
}
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
&wc->refs[level - 1],
&wc->flags[level - 1]);
if (ret < 0)
goto out_unlock;
if (unlikely(wc->refs[level - 1] == 0)) {
btrfs_err(fs_info, "Missing references.");
ret = -EIO;
goto out_unlock;
}
*lookup_info = 0;
if (wc->stage == DROP_REFERENCE) {
if (wc->refs[level - 1] > 1) {
need_account = true;
if (level == 1 &&
(wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
goto skip;
if (!wc->update_ref ||
generation <= root->root_key.offset)
goto skip;
btrfs_node_key_to_cpu(path->nodes[level], &key,
path->slots[level]);
ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
if (ret < 0)
goto skip;
wc->stage = UPDATE_BACKREF;
wc->shared_level = level - 1;
}
} else {
if (level == 1 &&
(wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
goto skip;
}
if (!btrfs_buffer_uptodate(next, generation, 0)) {
btrfs_tree_unlock(next);
free_extent_buffer(next);
next = NULL;
*lookup_info = 1;
}
if (!next) {
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
next = read_tree_block(fs_info, bytenr, generation);
if (IS_ERR(next)) {
return PTR_ERR(next);
} else if (!extent_buffer_uptodate(next)) {
free_extent_buffer(next);
return -EIO;
}
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
}
level--;
ASSERT(level == btrfs_header_level(next));
if (level != btrfs_header_level(next)) {
btrfs_err(root->fs_info, "mismatched level");
ret = -EIO;
goto out_unlock;
}
path->nodes[level] = next;
path->slots[level] = 0;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
wc->level = level;
if (wc->level == 1)
wc->reada_slot = 0;
return 0;
skip:
wc->refs[level - 1] = 0;
wc->flags[level - 1] = 0;
if (wc->stage == DROP_REFERENCE) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
parent = path->nodes[level]->start;
} else {
ASSERT(root->root_key.objectid ==
btrfs_header_owner(path->nodes[level]));
if (root->root_key.objectid !=
btrfs_header_owner(path->nodes[level])) {
btrfs_err(root->fs_info,
"mismatched block owner");
ret = -EIO;
goto out_unlock;
}
parent = 0;
}
if (need_account) {
ret = btrfs_qgroup_trace_subtree(trans, root, next,
generation, level - 1);
if (ret) {
btrfs_err_rl(fs_info,
"Error %d accounting shared subtree. Quota is out of sync, rescan required.",
ret);
}
}
ret = btrfs_free_extent(trans, root, bytenr, blocksize,
parent, root->root_key.objectid,
level - 1, 0);
if (ret)
goto out_unlock;
}
*lookup_info = 1;
ret = 1;
out_unlock:
btrfs_tree_unlock(next);
free_extent_buffer(next);
return ret;
}
/*
* helper to process tree block while walking up the tree.
*
* when wc->stage == DROP_REFERENCE, this function drops
* reference count on the block.
*
* when wc->stage == UPDATE_BACKREF, this function changes
* wc->stage back to DROP_REFERENCE if we changed wc->stage
* to UPDATE_BACKREF previously while processing the block.
*
* NOTE: return value 1 means we should stop walking up.
*/
static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
u64 parent = 0;
if (wc->stage == UPDATE_BACKREF) {
BUG_ON(wc->shared_level < level);
if (level < wc->shared_level)
goto out;
ret = find_next_key(path, level + 1, &wc->update_progress);
if (ret > 0)
wc->update_ref = 0;
wc->stage = DROP_REFERENCE;
wc->shared_level = -1;
path->slots[level] = 0;
/*
* check reference count again if the block isn't locked.
* we should start walking down the tree again if reference
* count is one.
*/
if (!path->locks[level]) {
BUG_ON(level == 0);
btrfs_tree_lock(eb);
btrfs_set_lock_blocking(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
if (ret < 0) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
return ret;
}
BUG_ON(wc->refs[level] == 0);
if (wc->refs[level] == 1) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
return 1;
}
}
}
/* wc->stage == DROP_REFERENCE */
BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
if (wc->refs[level] == 1) {
if (level == 0) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
ret = btrfs_dec_ref(trans, root, eb, 1);
else
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
if (ret) {
btrfs_err_rl(fs_info,
"error %d accounting leaf items. Quota is out of sync, rescan required.",
ret);
}
}
/* make block locked assertion in clean_tree_block happy */
if (!path->locks[level] &&
btrfs_header_generation(eb) == trans->transid) {
btrfs_tree_lock(eb);
btrfs_set_lock_blocking(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
}
clean_tree_block(fs_info, eb);
}
if (eb == root->node) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = eb->start;
else
BUG_ON(root->root_key.objectid !=
btrfs_header_owner(eb));
} else {
if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = path->nodes[level + 1]->start;
else
BUG_ON(root->root_key.objectid !=
btrfs_header_owner(path->nodes[level + 1]));
}
btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
out:
wc->refs[level] = 0;
wc->flags[level] = 0;
return 0;
}
static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc)
{
int level = wc->level;
int lookup_info = 1;
int ret;
while (level >= 0) {
ret = walk_down_proc(trans, root, path, wc, lookup_info);
if (ret > 0)
break;
if (level == 0)
break;
if (path->slots[level] >=
btrfs_header_nritems(path->nodes[level]))
break;
ret = do_walk_down(trans, root, path, wc, &lookup_info);
if (ret > 0) {
path->slots[level]++;
continue;
} else if (ret < 0)
return ret;
level = wc->level;
}
return 0;
}
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc, int max_level)
{
int level = wc->level;
int ret;
path->slots[level] = btrfs_header_nritems(path->nodes[level]);
while (level < max_level && path->nodes[level]) {
wc->level = level;
if (path->slots[level] + 1 <
btrfs_header_nritems(path->nodes[level])) {
path->slots[level]++;
return 0;
} else {
ret = walk_up_proc(trans, root, path, wc);
if (ret > 0)
return 0;
if (path->locks[level]) {
btrfs_tree_unlock_rw(path->nodes[level],
path->locks[level]);
path->locks[level] = 0;
}
free_extent_buffer(path->nodes[level]);
path->nodes[level] = NULL;
level++;
}
}
return 1;
}
/*
* drop a subvolume tree.
*
* this function traverses the tree freeing any blocks that only
* referenced by the tree.
*
* when a shared tree block is found. this function decreases its
* reference count by one. if update_ref is true, this function
* also make sure backrefs for the shared block and all lower level
* blocks are properly updated.
*
* If called with for_reloc == 0, may exit early with -EAGAIN
*/
int btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref,
int for_reloc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
struct walk_control *wc;
struct btrfs_key key;
int err = 0;
int ret;
int level;
bool root_dropped = false;
btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
wc = kzalloc(sizeof(*wc), GFP_NOFS);
if (!wc) {
btrfs_free_path(path);
err = -ENOMEM;
goto out;
}
trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out_free;
}
if (block_rsv)
trans->block_rsv = block_rsv;
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
btrfs_set_lock_blocking(path->nodes[level]);
path->slots[level] = 0;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
memset(&wc->update_progress, 0,
sizeof(wc->update_progress));
} else {
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
memcpy(&wc->update_progress, &key,
sizeof(wc->update_progress));
level = root_item->drop_level;
BUG_ON(level == 0);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
path->lowest_level = 0;
if (ret < 0) {
err = ret;
goto out_end_trans;
}
WARN_ON(ret > 0);
/*
* unlock our path, this is safe because only this
* function is allowed to delete this snapshot
*/
btrfs_unlock_up_safe(path, 0);
level = btrfs_header_level(root->node);
while (1) {
btrfs_tree_lock(path->nodes[level]);
btrfs_set_lock_blocking(path->nodes[level]);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
ret = btrfs_lookup_extent_info(trans, fs_info,
path->nodes[level]->start,
level, 1, &wc->refs[level],
&wc->flags[level]);
if (ret < 0) {
err = ret;
goto out_end_trans;
}
BUG_ON(wc->refs[level] == 0);
if (level == root_item->drop_level)
break;
btrfs_tree_unlock(path->nodes[level]);
path->locks[level] = 0;
WARN_ON(wc->refs[level] != 1);
level--;
}
}
wc->level = level;
wc->shared_level = -1;
wc->stage = DROP_REFERENCE;
wc->update_ref = update_ref;
wc->keep_locks = 0;
wc->for_reloc = for_reloc;
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
ret = walk_down_tree(trans, root, path, wc);
if (ret < 0) {
err = ret;
break;
}
ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
if (ret < 0) {
err = ret;
break;
}
if (ret > 0) {
BUG_ON(wc->stage != DROP_REFERENCE);
break;
}
if (wc->stage == DROP_REFERENCE) {
level = wc->level;
btrfs_node_key(path->nodes[level],
&root_item->drop_progress,
path->slots[level]);
root_item->drop_level = level;
}
BUG_ON(wc->level == 0);
if (btrfs_should_end_transaction(trans) ||
(!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
root_item);
if (ret) {
btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
btrfs_end_transaction_throttle(trans);
if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
btrfs_debug(fs_info,
"drop snapshot early exit");
err = -EAGAIN;
goto out_free;
}
trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out_free;
}
if (block_rsv)
trans->block_rsv = block_rsv;
}
}
btrfs_release_path(path);
if (err)
goto out_end_trans;
ret = btrfs_del_root(trans, fs_info, &root->root_key);
if (ret) {
btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_find_root(tree_root, &root->root_key, path,
NULL, NULL);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
} else if (ret > 0) {
/* if we fail to delete the orphan item this time
* around, it'll get picked up the next time.
*
* The most common failure here is just -ENOENT.
*/
btrfs_del_orphan_item(trans, tree_root,
root->root_key.objectid);
}
}
if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
btrfs_add_dropped_root(trans, root);
} else {
free_extent_buffer(root->node);
free_extent_buffer(root->commit_root);
btrfs_put_fs_root(root);
}
root_dropped = true;
out_end_trans:
btrfs_end_transaction_throttle(trans);
out_free:
kfree(wc);
btrfs_free_path(path);
out:
/*
* So if we need to stop dropping the snapshot for whatever reason we
* need to make sure to add it back to the dead root list so that we
* keep trying to do the work later. This also cleans up roots if we
* don't have it in the radix (like when we recover after a power fail
* or unmount) so we don't leak memory.
*/
if (!for_reloc && !root_dropped)
btrfs_add_dead_root(root);
if (err && err != -EAGAIN)
btrfs_handle_fs_error(fs_info, err, NULL);
return err;
}
/*
* drop subtree rooted at tree block 'node'.
*
* NOTE: this function will unlock and release tree block 'node'
* only used by relocation code
*/
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
struct extent_buffer *parent)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct walk_control *wc;
int level;
int parent_level;
int ret = 0;
int wret;
BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
if (!wc) {
btrfs_free_path(path);
return -ENOMEM;
}
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
extent_buffer_get(parent);
path->nodes[parent_level] = parent;
path->slots[parent_level] = btrfs_header_nritems(parent);
btrfs_assert_tree_locked(node);
level = btrfs_header_level(node);
path->nodes[level] = node;
path->slots[level] = 0;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
wc->refs[parent_level] = 1;
wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
wc->level = level;
wc->shared_level = -1;
wc->stage = DROP_REFERENCE;
wc->update_ref = 0;
wc->keep_locks = 1;
wc->for_reloc = 1;
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
wret = walk_down_tree(trans, root, path, wc);
if (wret < 0) {
ret = wret;
break;
}
wret = walk_up_tree(trans, root, path, wc, parent_level);
if (wret < 0)
ret = wret;
if (wret != 0)
break;
}
kfree(wc);
btrfs_free_path(path);
return ret;
}
static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 num_devices;
u64 stripped;
/*
* if restripe for this chunk_type is on pick target profile and
* return, otherwise do the usual balance
*/
stripped = get_restripe_target(fs_info, flags);
if (stripped)
return extended_to_chunk(stripped);
num_devices = fs_info->fs_devices->rw_devices;
stripped = BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
if (num_devices == 1) {
stripped |= BTRFS_BLOCK_GROUP_DUP;
stripped = flags & ~stripped;
/* turn raid0 into single device chunks */
if (flags & BTRFS_BLOCK_GROUP_RAID0)
return stripped;
/* turn mirroring into duplication */
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
return stripped | BTRFS_BLOCK_GROUP_DUP;
} else {
/* they already had raid on here, just return */
if (flags & stripped)
return flags;
stripped |= BTRFS_BLOCK_GROUP_DUP;
stripped = flags & ~stripped;
/* switch duplicated blocks with raid1 */
if (flags & BTRFS_BLOCK_GROUP_DUP)
return stripped | BTRFS_BLOCK_GROUP_RAID1;
/* this is drive concat, leave it alone */
}
return flags;
}
static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
u64 min_allocable_bytes;
int ret = -ENOSPC;
/*
* We need some metadata space and system metadata space for
* allocating chunks in some corner cases until we force to set
* it to be readonly.
*/
if ((sinfo->flags &
(BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
!force)
min_allocable_bytes = SZ_1M;
else
min_allocable_bytes = 0;
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
if (cache->ro) {
cache->ro++;
ret = 0;
goto out;
}
num_bytes = cache->key.offset - cache->reserved - cache->pinned -
cache->bytes_super - btrfs_block_group_used(&cache->item);
if (btrfs_space_info_used(sinfo, true) + num_bytes +
min_allocable_bytes <= sinfo->total_bytes) {
sinfo->bytes_readonly += num_bytes;
cache->ro++;
list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
ret = 0;
}
out:
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
return ret;
}
int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
struct btrfs_trans_handle *trans;
u64 alloc_flags;
int ret;
again:
trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans))
return PTR_ERR(trans);
/*
* we're not allowed to set block groups readonly after the dirty
* block groups cache has started writing. If it already started,
* back off and let this transaction commit
*/
mutex_lock(&fs_info->ro_block_group_mutex);
if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
u64 transid = trans->transid;
mutex_unlock(&fs_info->ro_block_group_mutex);
btrfs_end_transaction(trans);
ret = btrfs_wait_for_commit(fs_info, transid);
if (ret)
return ret;
goto again;
}
/*
* if we are changing raid levels, try to allocate a corresponding
* block group with the new raid level.
*/
alloc_flags = update_block_group_flags(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
ret = do_chunk_alloc(trans, fs_info, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
* already allocated at the new raid level to
* carry on
*/
if (ret == -ENOSPC)
ret = 0;
if (ret < 0)
goto out;
}
ret = inc_block_group_ro(cache, 0);
if (!ret)
goto out;
alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
ret = do_chunk_alloc(trans, fs_info, alloc_flags,
CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = inc_block_group_ro(cache, 0);
out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(fs_info, cache->flags);
mutex_lock(&fs_info->chunk_mutex);
check_system_chunk(trans, fs_info, alloc_flags);
mutex_unlock(&fs_info->chunk_mutex);
}
mutex_unlock(&fs_info->ro_block_group_mutex);
btrfs_end_transaction(trans);
return ret;
}
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 type)
{
u64 alloc_flags = get_alloc_profile(fs_info, type);
return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
}
/*
* helper to account the unused space of all the readonly block group in the
* space_info. takes mirrors into account.
*/
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
{
struct btrfs_block_group_cache *block_group;
u64 free_bytes = 0;
int factor;
/* It's df, we don't care if it's racy */
if (list_empty(&sinfo->ro_bgs))
return 0;
spin_lock(&sinfo->lock);
list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
spin_lock(&block_group->lock);
if (!block_group->ro) {
spin_unlock(&block_group->lock);
continue;
}
if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_DUP))
factor = 2;
else
factor = 1;
free_bytes += (block_group->key.offset -
btrfs_block_group_used(&block_group->item)) *
factor;
spin_unlock(&block_group->lock);
}
spin_unlock(&sinfo->lock);
return free_bytes;
}
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
BUG_ON(!cache->ro);
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
if (!--cache->ro) {
num_bytes = cache->key.offset - cache->reserved -
cache->pinned - cache->bytes_super -
btrfs_block_group_used(&cache->item);
sinfo->bytes_readonly -= num_bytes;
list_del_init(&cache->ro_list);
}
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
}
/*
* checks to see if its even possible to relocate this block group.
*
* @return - -1 if it's not a good idea to relocate this block group, 0 if its
* ok to go ahead and try.
*/
int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
struct btrfs_trans_handle *trans;
u64 min_free;
u64 dev_min = 1;
u64 dev_nr = 0;
u64 target;
int debug;
int index;
int full = 0;
int ret = 0;
debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
block_group = btrfs_lookup_block_group(fs_info, bytenr);
/* odd, couldn't find the block group, leave it alone */
if (!block_group) {
if (debug)
btrfs_warn(fs_info,
"can't find block group for bytenr %llu",
bytenr);
return -1;
}
min_free = btrfs_block_group_used(&block_group->item);
/* no bytes used, we're good */
if (!min_free)
goto out;
space_info = block_group->space_info;
spin_lock(&space_info->lock);
full = space_info->full;
/*
* if this is the last block group we have in this space, we can't
* relocate it unless we're able to allocate a new chunk below.
*
* Otherwise, we need to make sure we have room in the space to handle
* all of the extents from this block group. If we can, we're good
*/
if ((space_info->total_bytes != block_group->key.offset) &&
(btrfs_space_info_used(space_info, false) + min_free <
space_info->total_bytes)) {
spin_unlock(&space_info->lock);
goto out;
}
spin_unlock(&space_info->lock);
/*
* ok we don't have enough space, but maybe we have free space on our
* devices to allocate new chunks for relocation, so loop through our
* alloc devices and guess if we have enough space. if this block
* group is going to be restriped, run checks against the target
* profile instead of the current one.
*/
ret = -1;
/*
* index:
* 0: raid10
* 1: raid1
* 2: dup
* 3: raid0
* 4: single
*/
target = get_restripe_target(fs_info, block_group->flags);
if (target) {
index = __get_raid_index(extended_to_chunk(target));
} else {
/*
* this is just a balance, so if we were marked as full
* we know there is no space for a new chunk
*/
if (full) {
if (debug)
btrfs_warn(fs_info,
"no space to alloc new chunk for block group %llu",
block_group->key.objectid);
goto out;
}
index = get_block_group_index(block_group);
}
if (index == BTRFS_RAID_RAID10) {
dev_min = 4;
/* Divide by 2 */
min_free >>= 1;
} else if (index == BTRFS_RAID_RAID1) {
dev_min = 2;
} else if (index == BTRFS_RAID_DUP) {
/* Multiply by 2 */
min_free <<= 1;
} else if (index == BTRFS_RAID_RAID0) {
dev_min = fs_devices->rw_devices;
min_free = div64_u64(min_free, dev_min);
}
/* We need to do this so that we can look at pending chunks */
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
mutex_lock(&fs_info->chunk_mutex);
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
u64 dev_offset;
/*
* check to make sure we can actually find a chunk with enough
* space to fit our block group in.
*/
if (device->total_bytes > device->bytes_used + min_free &&
!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = find_free_dev_extent(trans, device, min_free,
&dev_offset, NULL);
if (!ret)
dev_nr++;
if (dev_nr >= dev_min)
break;
ret = -1;
}
}
if (debug && ret == -1)
btrfs_warn(fs_info,
"no space to allocate a new chunk for block group %llu",
block_group->key.objectid);
mutex_unlock(&fs_info->chunk_mutex);
btrfs_end_transaction(trans);
out:
btrfs_put_block_group(block_group);
return ret;
}
static int find_first_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *key)
{
struct btrfs_root *root = fs_info->extent_root;
int ret = 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
int slot;
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
if (ret < 0)
goto out;
while (1) {
slot = path->slots[0];
leaf = path->nodes[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out;
break;
}
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid >= key->objectid &&
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
struct extent_map_tree *em_tree;
struct extent_map *em;
em_tree = &root->fs_info->mapping_tree.map_tree;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, found_key.objectid,
found_key.offset);
read_unlock(&em_tree->lock);
if (!em) {
btrfs_err(fs_info,
"logical %llu len %llu found bg but no related chunk",
found_key.objectid, found_key.offset);
ret = -ENOENT;
} else {
ret = 0;
}
free_extent_map(em);
goto out;
}
path->slots[0]++;
}
out:
return ret;
}
void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
{
struct btrfs_block_group_cache *block_group;
u64 last = 0;
while (1) {
struct inode *inode;
block_group = btrfs_lookup_first_block_group(info, last);
while (block_group) {
spin_lock(&block_group->lock);
if (block_group->iref)
break;
spin_unlock(&block_group->lock);
block_group = next_block_group(info, block_group);
}
if (!block_group) {
if (last == 0)
break;
last = 0;
continue;
}
inode = block_group->inode;
block_group->iref = 0;
block_group->inode = NULL;
spin_unlock(&block_group->lock);
ASSERT(block_group->io_ctl.inode == NULL);
iput(inode);
last = block_group->key.objectid + block_group->key.offset;
btrfs_put_block_group(block_group);
}
}
/*
* Must be called only after stopping all workers, since we could have block
* group caching kthreads running, and therefore they could race with us if we
* freed the block groups before stopping them.
*/
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
down_write(&info->commit_root_sem);
while (!list_empty(&info->caching_block_groups)) {
caching_ctl = list_entry(info->caching_block_groups.next,
struct btrfs_caching_control, list);
list_del(&caching_ctl->list);
put_caching_control(caching_ctl);
}
up_write(&info->commit_root_sem);
spin_lock(&info->unused_bgs_lock);
while (!list_empty(&info->unused_bgs)) {
block_group = list_first_entry(&info->unused_bgs,
struct btrfs_block_group_cache,
bg_list);
list_del_init(&block_group->bg_list);
btrfs_put_block_group(block_group);
}
spin_unlock(&info->unused_bgs_lock);
spin_lock(&info->block_group_cache_lock);
while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
block_group = rb_entry(n, struct btrfs_block_group_cache,
cache_node);
rb_erase(&block_group->cache_node,
&info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
spin_unlock(&info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
up_write(&block_group->space_info->groups_sem);
/*
* We haven't cached this block group, which means we could
* possibly have excluded extents on this block group.
*/
if (block_group->cached == BTRFS_CACHE_NO ||
block_group->cached == BTRFS_CACHE_ERROR)
free_excluded_extents(info, block_group);
btrfs_remove_free_space_cache(block_group);
ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
ASSERT(list_empty(&block_group->dirty_list));
ASSERT(list_empty(&block_group->io_list));
ASSERT(list_empty(&block_group->bg_list));
ASSERT(atomic_read(&block_group->count) == 1);
btrfs_put_block_group(block_group);
spin_lock(&info->block_group_cache_lock);
}
spin_unlock(&info->block_group_cache_lock);
/* now that all the block groups are freed, go through and
* free all the space_info structs. This is only called during
* the final stages of unmount, and so we know nobody is
* using them. We call synchronize_rcu() once before we start,
* just to be on the safe side.
*/
synchronize_rcu();
release_global_block_rsv(info);
while (!list_empty(&info->space_info)) {
int i;
space_info = list_entry(info->space_info.next,
struct btrfs_space_info,
list);
/*
* Do not hide this behind enospc_debug, this is actually
* important and indicates a real bug if this happens.
*/
if (WARN_ON(space_info->bytes_pinned > 0 ||
space_info->bytes_reserved > 0 ||
space_info->bytes_may_use > 0))
dump_space_info(info, space_info, 0, 0);
list_del(&space_info->list);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
struct kobject *kobj;
kobj = space_info->block_group_kobjs[i];
space_info->block_group_kobjs[i] = NULL;
if (kobj) {
kobject_del(kobj);
kobject_put(kobj);
}
}
kobject_del(&space_info->kobj);
kobject_put(&space_info->kobj);
}
return 0;
}
static void link_block_group(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *space_info = cache->space_info;
int index = get_block_group_index(cache);
bool first = false;
down_write(&space_info->groups_sem);
if (list_empty(&space_info->block_groups[index]))
first = true;
list_add_tail(&cache->list, &space_info->block_groups[index]);
up_write(&space_info->groups_sem);
if (first) {
struct raid_kobject *rkobj;
int ret;
rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
if (!rkobj)
goto out_err;
rkobj->raid_type = index;
kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
ret = kobject_add(&rkobj->kobj, &space_info->kobj,
"%s", get_raid_name(index));
if (ret) {
kobject_put(&rkobj->kobj);
goto out_err;
}
space_info->block_group_kobjs[index] = &rkobj->kobj;
}
return;
out_err:
btrfs_warn(cache->fs_info,
"failed to add kobject for block cache, ignoring");
}
static struct btrfs_block_group_cache *
btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
u64 start, u64 size)
{
struct btrfs_block_group_cache *cache;
cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache)
return NULL;
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
GFP_NOFS);
if (!cache->free_space_ctl) {
kfree(cache);
return NULL;
}
cache->key.objectid = start;
cache->key.offset = size;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->fs_info = fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
set_free_space_tree_thresholds(cache);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
init_rwsem(&cache->data_rwsem);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
INIT_LIST_HEAD(&cache->bg_list);
INIT_LIST_HEAD(&cache->ro_list);
INIT_LIST_HEAD(&cache->dirty_list);
INIT_LIST_HEAD(&cache->io_list);
btrfs_init_free_space_ctl(cache);
atomic_set(&cache->trimming, 0);
mutex_init(&cache->free_space_lock);
btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
return cache;
}
int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_path *path;
int ret;
struct btrfs_block_group_cache *cache;
struct btrfs_space_info *space_info;
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf;
int need_clear = 0;
u64 cache_gen;
u64 feature;
int mixed;
feature = btrfs_super_incompat_flags(info->super_copy);
mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
key.objectid = 0;
key.offset = 0;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_FORWARD;
cache_gen = btrfs_super_cache_generation(info->super_copy);
if (btrfs_test_opt(info, SPACE_CACHE) &&
btrfs_super_generation(info->super_copy) != cache_gen)
need_clear = 1;
if (btrfs_test_opt(info, CLEAR_CACHE))
need_clear = 1;
while (1) {
ret = find_first_block_group(info, path, &key);
if (ret > 0)
break;
if (ret != 0)
goto error;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
cache = btrfs_create_block_group_cache(info, found_key.objectid,
found_key.offset);
if (!cache) {
ret = -ENOMEM;
goto error;
}
if (need_clear) {
/*
* When we mount with old space cache, we need to
* set BTRFS_DC_CLEAR and set dirty flag.
*
* a) Setting 'BTRFS_DC_CLEAR' makes sure that we
* truncate the old free space cache inode and
* setup a new one.
* b) Setting 'dirty flag' makes sure that we flush
* the new space cache info onto disk.
*/
if (btrfs_test_opt(info, SPACE_CACHE))
cache->disk_cache_state = BTRFS_DC_CLEAR;
}
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item));
cache->flags = btrfs_block_group_flags(&cache->item);
if (!mixed &&
((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
(cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
btrfs_err(info,
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
cache->key.objectid);
ret = -EINVAL;
goto error;
}
key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(path);
/*
* We need to exclude the super stripes now so that the space
* info has super bytes accounted for, otherwise we'll think
* we have more space than we actually do.
*/
ret = exclude_super_stripes(info, cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
free_excluded_extents(info, cache);
btrfs_put_block_group(cache);
goto error;
}
/*
* check for two cases, either we are full, and therefore
* don't need to bother with the caching work since we won't
* find any space, or we are empty, and we can just add all
* the space in and be done with it. This saves us _alot_ of
* time, particularly in the full case.
*/
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
free_excluded_extents(info, cache);
} else if (btrfs_block_group_used(&cache->item) == 0) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, info,
found_key.objectid,
found_key.objectid +
found_key.offset);
free_excluded_extents(info, cache);
}
ret = btrfs_add_block_group_cache(info, cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
goto error;
}
trace_btrfs_add_block_group(info, cache, 0);
update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
cache->bytes_super, &space_info);
cache->space_info = space_info;
link_block_group(cache);
set_avail_alloc_bits(info, cache->flags);
if (btrfs_chunk_readonly(info, cache->key.objectid)) {
inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) {
spin_lock(&info->unused_bgs_lock);
/* Should always be true but just in case. */
if (list_empty(&cache->bg_list)) {
btrfs_get_block_group(cache);
list_add_tail(&cache->bg_list,
&info->unused_bgs);
}
spin_unlock(&info->unused_bgs_lock);
}
}
list_for_each_entry_rcu(space_info, &info->space_info, list) {
if (!(get_alloc_profile(info, space_info->flags) &
(BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_DUP)))
continue;
/*
* avoid allocating from un-mirrored block group if there are
* mirrored block groups.
*/
list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_RAID0],
list)
inc_block_group_ro(cache, 1);
list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_SINGLE],
list)
inc_block_group_ro(cache, 1);
}
init_global_block_rsv(info);
ret = 0;
error:
btrfs_free_path(path);
return ret;
}
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *block_group, *tmp;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_block_group_item item;
struct btrfs_key key;
int ret = 0;
bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
trans->can_flush_pending_bgs = false;
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
if (ret)
goto next;
spin_lock(&block_group->lock);
memcpy(&item, &block_group->item, sizeof(item));
memcpy(&key, &block_group->key, sizeof(key));
spin_unlock(&block_group->lock);
ret = btrfs_insert_item(trans, extent_root, &key, &item,
sizeof(item));
if (ret)
btrfs_abort_transaction(trans, ret);
ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
key.offset);
if (ret)
btrfs_abort_transaction(trans, ret);
add_block_group_free_space(trans, fs_info, block_group);
/* already aborted the transaction if it failed. */
next:
list_del_init(&block_group->bg_list);
}
trans->can_flush_pending_bgs = can_flush_pending_bgs;
}
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytes_used,
u64 type, u64 chunk_offset, u64 size)
{
struct btrfs_block_group_cache *cache;
int ret;
btrfs_set_log_full_commit(fs_info, trans);
cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
if (!cache)
return -ENOMEM;
btrfs_set_block_group_used(&cache->item, bytes_used);
btrfs_set_block_group_chunk_objectid(&cache->item,
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
btrfs_set_block_group_flags(&cache->item, type);
cache->flags = type;
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
cache->needs_free_space = 1;
ret = exclude_super_stripes(fs_info, cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
free_excluded_extents(fs_info, cache);
btrfs_put_block_group(cache);
return ret;
}
add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
free_excluded_extents(fs_info, cache);
#ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(cache)) {
u64 new_bytes_used = size - bytes_used;
bytes_used += new_bytes_used >> 1;
fragment_free_space(cache);
}
#endif
/*
* Ensure the corresponding space_info object is created and
* assigned to our block group. We want our bg to be added to the rbtree
* with its ->space_info set.
*/
cache->space_info = __find_space_info(fs_info, cache->flags);
if (!cache->space_info) {
ret = create_space_info(fs_info, cache->flags,
&cache->space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
return ret;
}
}
ret = btrfs_add_block_group_cache(fs_info, cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
return ret;
}
/*
* Now that our block group has its ->space_info set and is inserted in
* the rbtree, update the space info's counters.
*/
trace_btrfs_add_block_group(fs_info, cache, 1);
update_space_info(fs_info, cache->flags, size, bytes_used,
cache->bytes_super, &cache->space_info);
update_global_block_rsv(fs_info);
link_block_group(cache);
list_add_tail(&cache->bg_list, &trans->new_bgs);
set_avail_alloc_bits(fs_info, type);
return 0;
}
static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits &= ~extra_flags;
write_sequnlock(&fs_info->profiles_lock);
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 group_start,
struct extent_map *em)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_path *path;
struct btrfs_block_group_cache *block_group;
struct btrfs_free_cluster *cluster;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_key key;
struct inode *inode;
struct kobject *kobj = NULL;
int ret;
int index;
int factor;
struct btrfs_caching_control *caching_ctl = NULL;
bool remove_em;
block_group = btrfs_lookup_block_group(fs_info, group_start);
BUG_ON(!block_group);
BUG_ON(!block_group->ro);
/*
* Free the reserved super bytes from this block group before
* remove it.
*/
free_excluded_extents(fs_info, block_group);
btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
block_group->key.offset);
memcpy(&key, &block_group->key, sizeof(key));
index = get_block_group_index(block_group);
if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
factor = 2;
else
factor = 1;
/* make sure this block group isn't part of an allocation cluster */
cluster = &fs_info->data_alloc_cluster;
spin_lock(&cluster->refill_lock);
btrfs_return_cluster_to_free_space(block_group, cluster);
spin_unlock(&cluster->refill_lock);
/*
* make sure this block group isn't part of a metadata
* allocation cluster
*/
cluster = &fs_info->meta_alloc_cluster;
spin_lock(&cluster->refill_lock);
btrfs_return_cluster_to_free_space(block_group, cluster);
spin_unlock(&cluster->refill_lock);
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
/*
* get the inode first so any iput calls done for the io_list
* aren't the final iput (no unlinks allowed now)
*/
inode = lookup_free_space_inode(fs_info, block_group, path);
mutex_lock(&trans->transaction->cache_write_mutex);
/*
* make sure our free spache cache IO is done before remove the
* free space inode
*/
spin_lock(&trans->transaction->dirty_bgs_lock);
if (!list_empty(&block_group->io_list)) {
list_del_init(&block_group->io_list);
WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
spin_unlock(&trans->transaction->dirty_bgs_lock);
btrfs_wait_cache_io(trans, block_group, path);
btrfs_put_block_group(block_group);
spin_lock(&trans->transaction->dirty_bgs_lock);
}
if (!list_empty(&block_group->dirty_list)) {
list_del_init(&block_group->dirty_list);
btrfs_put_block_group(block_group);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
mutex_unlock(&trans->transaction->cache_write_mutex);
if (!IS_ERR(inode)) {
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret) {
btrfs_add_delayed_iput(inode);
goto out;
}
clear_nlink(inode);
/* One for the block groups ref */
spin_lock(&block_group->lock);
if (block_group->iref) {
block_group->iref = 0;
block_group->inode = NULL;
spin_unlock(&block_group->lock);
iput(inode);
} else {
spin_unlock(&block_group->lock);
}
/* One for our lookup ref */
btrfs_add_delayed_iput(inode);
}
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.offset = block_group->key.objectid;
key.type = 0;
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
if (ret < 0)
goto out;
if (ret > 0)
btrfs_release_path(path);
if (ret == 0) {
ret = btrfs_del_item(trans, tree_root, path);
if (ret)
goto out;
btrfs_release_path(path);
}
spin_lock(&fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
&fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
if (fs_info->first_logical_byte == block_group->key.objectid)
fs_info->first_logical_byte = (u64)-1;
spin_unlock(&fs_info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
/*
* we must use list_del_init so people can check to see if they
* are still on the list after taking the semaphore
*/
list_del_init(&block_group->list);
if (list_empty(&block_group->space_info->block_groups[index])) {
kobj = block_group->space_info->block_group_kobjs[index];
block_group->space_info->block_group_kobjs[index] = NULL;
clear_avail_alloc_bits(fs_info, block_group->flags);
}
up_write(&block_group->space_info->groups_sem);
if (kobj) {
kobject_del(kobj);
kobject_put(kobj);
}
if (block_group->has_caching_ctl)
caching_ctl = get_caching_control(block_group);
if (block_group->cached == BTRFS_CACHE_STARTED)
wait_block_group_cache_done(block_group);
if (block_group->has_caching_ctl) {
down_write(&fs_info->commit_root_sem);
if (!caching_ctl) {
struct btrfs_caching_control *ctl;
list_for_each_entry(ctl,
&fs_info->caching_block_groups, list)
if (ctl->block_group == block_group) {
caching_ctl = ctl;
refcount_inc(&caching_ctl->count);
break;
}
}
if (caching_ctl)
list_del_init(&caching_ctl->list);
up_write(&fs_info->commit_root_sem);
if (caching_ctl) {
/* Once for the caching bgs list and once for us. */
put_caching_control(caching_ctl);
put_caching_control(caching_ctl);
}
}
spin_lock(&trans->transaction->dirty_bgs_lock);
if (!list_empty(&block_group->dirty_list)) {
WARN_ON(1);
}
if (!list_empty(&block_group->io_list)) {
WARN_ON(1);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
btrfs_remove_free_space_cache(block_group);
spin_lock(&block_group->space_info->lock);
list_del_init(&block_group->ro_list);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
WARN_ON(block_group->space_info->total_bytes
< block_group->key.offset);
WARN_ON(block_group->space_info->bytes_readonly
< block_group->key.offset);
WARN_ON(block_group->space_info->disk_total
< block_group->key.offset * factor);
}
block_group->space_info->total_bytes -= block_group->key.offset;
block_group->space_info->bytes_readonly -= block_group->key.offset;
block_group->space_info->disk_total -= block_group->key.offset * factor;
spin_unlock(&block_group->space_info->lock);
memcpy(&key, &block_group->key, sizeof(key));
mutex_lock(&fs_info->chunk_mutex);
if (!list_empty(&em->list)) {
/* We're in the transaction->pending_chunks list. */
free_extent_map(em);
}
spin_lock(&block_group->lock);
block_group->removed = 1;
/*
* At this point trimming can't start on this block group, because we
* removed the block group from the tree fs_info->block_group_cache_tree
* so no one can't find it anymore and even if someone already got this
* block group before we removed it from the rbtree, they have already
* incremented block_group->trimming - if they didn't, they won't find
* any free space entries because we already removed them all when we
* called btrfs_remove_free_space_cache().
*
* And we must not remove the extent map from the fs_info->mapping_tree
* to prevent the same logical address range and physical device space
* ranges from being reused for a new block group. This is because our
* fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
* completely transactionless, so while it is trimming a range the
* currently running transaction might finish and a new one start,
* allowing for new block groups to be created that can reuse the same
* physical device locations unless we take this special care.
*
* There may also be an implicit trim operation if the file system
* is mounted with -odiscard. The same protections must remain
* in place until the extents have been discarded completely when
* the transaction commit has completed.
*/
remove_em = (atomic_read(&block_group->trimming) == 0);
/*
* Make sure a trimmer task always sees the em in the pinned_chunks list
* if it sees block_group->removed == 1 (needs to lock block_group->lock
* before checking block_group->removed).
*/
if (!remove_em) {
/*
* Our em might be in trans->transaction->pending_chunks which
* is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
* and so is the fs_info->pinned_chunks list.
*
* So at this point we must be holding the chunk_mutex to avoid
* any races with chunk allocation (more specifically at
* volumes.c:contains_pending_extent()), to ensure it always
* sees the em, either in the pending_chunks list or in the
* pinned_chunks list.
*/
list_move_tail(&em->list, &fs_info->pinned_chunks);
}
spin_unlock(&block_group->lock);
if (remove_em) {
struct extent_map_tree *em_tree;
em_tree = &fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
/*
* The em might be in the pending_chunks list, so make sure the
* chunk mutex is locked, since remove_extent_mapping() will
* delete us from that list.
*/
remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
/* once for the tree */
free_extent_map(em);
}
mutex_unlock(&fs_info->chunk_mutex);
ret = remove_block_group_free_space(trans, fs_info, block_group);
if (ret)
goto out;
btrfs_put_block_group(block_group);
btrfs_put_block_group(block_group);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -EIO;
if (ret < 0)
goto out;
ret = btrfs_del_item(trans, root, path);
out:
btrfs_free_path(path);
return ret;
}
struct btrfs_trans_handle *
btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
const u64 chunk_offset)
{
struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
struct extent_map *em;
struct map_lookup *map;
unsigned int num_items;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
read_unlock(&em_tree->lock);
ASSERT(em && em->start == chunk_offset);
/*
* We need to reserve 3 + N units from the metadata space info in order
* to remove a block group (done at btrfs_remove_chunk() and at
* btrfs_remove_block_group()), which are used for:
*
* 1 unit for adding the free space inode's orphan (located in the tree
* of tree roots).
* 1 unit for deleting the block group item (located in the extent
* tree).
* 1 unit for deleting the free space item (located in tree of tree
* roots).
* N units for deleting N device extent items corresponding to each
* stripe (located in the device tree).
*
* In order to remove a block group we also need to reserve units in the
* system space info in order to update the chunk tree (update one or
* more device items and remove one chunk item), but this is done at
* btrfs_remove_chunk() through a call to check_system_chunk().
*/
map = em->map_lookup;
num_items = 3 + map->num_stripes;
free_extent_map(em);
return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
num_items, 1);
}
/*
* Process the unused_bgs list and remove any that don't have any allocated
* space inside of them.
*/
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
int ret = 0;
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
spin_lock(&fs_info->unused_bgs_lock);
while (!list_empty(&fs_info->unused_bgs)) {
u64 start, end;
int trimming;
block_group = list_first_entry(&fs_info->unused_bgs,
struct btrfs_block_group_cache,
bg_list);
list_del_init(&block_group->bg_list);
space_info = block_group->space_info;
if (ret || btrfs_mixed_space_info(space_info)) {
btrfs_put_block_group(block_group);
continue;
}
spin_unlock(&fs_info->unused_bgs_lock);
mutex_lock(&fs_info->delete_unused_bgs_mutex);
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
spin_lock(&block_group->lock);
if (block_group->reserved ||
btrfs_block_group_used(&block_group->item) ||
block_group->ro ||
list_is_singular(&block_group->list)) {
/*
* We want to bail if we made new allocations or have
* outstanding allocations in this block group. We do
* the ro check in case balance is currently acting on
* this block group.
*/
spin_unlock(&block_group->lock);
up_write(&space_info->groups_sem);
goto next;
}
spin_unlock(&block_group->lock);
/* We don't want to force the issue, only flip if it's ok. */
ret = inc_block_group_ro(block_group, 0);
up_write(&space_info->groups_sem);
if (ret < 0) {
ret = 0;
goto next;
}
/*
* Want to do this before we do anything else so we can recover
* properly if we fail to join the transaction.
*/
trans = btrfs_start_trans_remove_block_group(fs_info,
block_group->key.objectid);
if (IS_ERR(trans)) {
btrfs_dec_block_group_ro(block_group);
ret = PTR_ERR(trans);
goto next;
}
/*
* We could have pending pinned extents for this block group,
* just delete them, we don't care about them anymore.
*/
start = block_group->key.objectid;
end = start + block_group->key.offset - 1;
/*
* Hold the unused_bg_unpin_mutex lock to avoid racing with
* btrfs_finish_extent_commit(). If we are at transaction N,
* another task might be running finish_extent_commit() for the
* previous transaction N - 1, and have seen a range belonging
* to the block group in freed_extents[] before we were able to
* clear the whole block group range from freed_extents[]. This
* means that task can lookup for the block group after we
* unpinned it from freed_extents[] and removed it, leading to
* a BUG_ON() at btrfs_unpin_extent_range().
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
btrfs_dec_block_group_ro(block_group);
goto end_trans;
}
ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
btrfs_dec_block_group_ro(block_group);
goto end_trans;
}
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
/* Reset pinned so btrfs_put_block_group doesn't complain */
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
space_info->bytes_pinned -= block_group->pinned;
space_info->bytes_readonly += block_group->pinned;
percpu_counter_add(&space_info->total_bytes_pinned,
-block_group->pinned);
block_group->pinned = 0;
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
/* DISCARD can flip during remount */
trimming = btrfs_test_opt(fs_info, DISCARD);
/* Implicit trim during transaction commit. */
if (trimming)
btrfs_get_block_group_trimming(block_group);
/*
* Btrfs_remove_chunk will abort the transaction if things go
* horribly wrong.
*/
ret = btrfs_remove_chunk(trans, fs_info,
block_group->key.objectid);
if (ret) {
if (trimming)
btrfs_put_block_group_trimming(block_group);
goto end_trans;
}
/*
* If we're not mounted with -odiscard, we can just forget
* about this block group. Otherwise we'll need to wait
* until transaction commit to do the actual discard.
*/
if (trimming) {
spin_lock(&fs_info->unused_bgs_lock);
/*
* A concurrent scrub might have added us to the list
* fs_info->unused_bgs, so use a list_move operation
* to add the block group to the deleted_bgs list.
*/
list_move(&block_group->bg_list,
&trans->transaction->deleted_bgs);
spin_unlock(&fs_info->unused_bgs_lock);
btrfs_get_block_group(block_group);
}
end_trans:
btrfs_end_transaction(trans);
next:
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_put_block_group(block_group);
spin_lock(&fs_info->unused_bgs_lock);
}
spin_unlock(&fs_info->unused_bgs_lock);
}
int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
{
struct btrfs_space_info *space_info;
struct btrfs_super_block *disk_super;
u64 features;
u64 flags;
int mixed = 0;
int ret;
disk_super = fs_info->super_copy;
if (!btrfs_super_root(disk_super))
return -EINVAL;
features = btrfs_super_incompat_flags(disk_super);
if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
mixed = 1;
flags = BTRFS_BLOCK_GROUP_SYSTEM;
ret = create_space_info(fs_info, flags, &space_info);
if (ret)
goto out;
if (mixed) {
flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
ret = create_space_info(fs_info, flags, &space_info);
} else {
flags = BTRFS_BLOCK_GROUP_METADATA;
ret = create_space_info(fs_info, flags, &space_info);
if (ret)
goto out;
flags = BTRFS_BLOCK_GROUP_DATA;
ret = create_space_info(fs_info, flags, &space_info);
}
out:
return ret;
}
int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end)
{
return unpin_extent_range(fs_info, start, end, false);
}
/*
* It used to be that old block groups would be left around forever.
* Iterating over them would be enough to trim unused space. Since we
* now automatically remove them, we also need to iterate over unallocated
* space.
*
* We don't want a transaction for this since the discard may take a
* substantial amount of time. We don't require that a transaction be
* running, but we do need to take a running transaction into account
* to ensure that we're not discarding chunks that were released in
* the current transaction.
*
* Holding the chunks lock will prevent other threads from allocating
* or releasing chunks, but it won't prevent a running transaction
* from committing and releasing the memory that the pending chunks
* list head uses. For that, we need to take a reference to the
* transaction.
*/
static int btrfs_trim_free_extents(struct btrfs_device *device,
u64 minlen, u64 *trimmed)
{
u64 start = 0, len = 0;
int ret;
*trimmed = 0;
/* Not writeable = nothing to do. */
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return 0;
/* No free space = nothing to do. */
if (device->total_bytes <= device->bytes_used)
return 0;
ret = 0;
while (1) {
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_transaction *trans;
u64 bytes;
ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
if (ret)
return ret;
down_read(&fs_info->commit_root_sem);
spin_lock(&fs_info->trans_lock);
trans = fs_info->running_transaction;
if (trans)
refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
ret = find_free_dev_extent_start(trans, device, minlen, start,
&start, &len);
if (trans)
btrfs_put_transaction(trans);
if (ret) {
up_read(&fs_info->commit_root_sem);
mutex_unlock(&fs_info->chunk_mutex);
if (ret == -ENOSPC)
ret = 0;
break;
}
ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
up_read(&fs_info->commit_root_sem);
mutex_unlock(&fs_info->chunk_mutex);
if (ret)
break;
start += len;
*trimmed += bytes;
if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
cond_resched();
}
return ret;
}
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
{
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_device *device;
struct list_head *devices;
u64 group_trimmed;
u64 start;
u64 end;
u64 trimmed = 0;
u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret = 0;
/*
* try to trim all FS space, our block group may start from non-zero.
*/
if (range->len == total_bytes)
cache = btrfs_lookup_first_block_group(fs_info, range->start);
else
cache = btrfs_lookup_block_group(fs_info, range->start);
while (cache) {
if (cache->key.objectid >= (range->start + range->len)) {
btrfs_put_block_group(cache);
break;
}
start = max(range->start, cache->key.objectid);
end = min(range->start + range->len,
cache->key.objectid + cache->key.offset);
if (end - start >= range->minlen) {
if (!block_group_cache_done(cache)) {
ret = cache_block_group(cache, 0);
if (ret) {
btrfs_put_block_group(cache);
break;
}
ret = wait_block_group_cache_done(cache);
if (ret) {
btrfs_put_block_group(cache);
break;
}
}
ret = btrfs_trim_block_group(cache,
&group_trimmed,
start,
end,
range->minlen);
trimmed += group_trimmed;
if (ret) {
btrfs_put_block_group(cache);
break;
}
}
cache = next_block_group(fs_info, cache);
}
mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->alloc_list;
list_for_each_entry(device, devices, dev_alloc_list) {
ret = btrfs_trim_free_extents(device, range->minlen,
&group_trimmed);
if (ret)
break;
trimmed += group_trimmed;
}
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
range->len = trimmed;
return ret;
}
/*
* btrfs_{start,end}_write_no_snapshotting() are similar to
* mnt_{want,drop}_write(), they are used to prevent some tasks from writing
* data into the page cache through nocow before the subvolume is snapshoted,
* but flush the data into disk after the snapshot creation, or to prevent
* operations while snapshotting is ongoing and that cause the snapshot to be
* inconsistent (writes followed by expanding truncates for example).
*/
void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
{
percpu_counter_dec(&root->subv_writers->counter);
/*
* Make sure counter is updated before we wake up waiters.
*/
smp_mb();
if (waitqueue_active(&root->subv_writers->wait))
wake_up(&root->subv_writers->wait);
}
int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
{
if (atomic_read(&root->will_be_snapshotted))
return 0;
percpu_counter_inc(&root->subv_writers->counter);
/*
* Make sure counter is updated before we check for snapshot creation.
*/
smp_mb();
if (atomic_read(&root->will_be_snapshotted)) {
btrfs_end_write_no_snapshotting(root);
return 0;
}
return 1;
}
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
{
while (true) {
int ret;
ret = btrfs_start_write_no_snapshotting(root);
if (ret)
break;
wait_on_atomic_t(&root->will_be_snapshotted, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
}
}
| zhiyisun/linux | fs/btrfs/extent-tree.c | C | gpl-2.0 | 307,040 |
/* Copyright 2020 Obosob <obosob@riseup.net>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#ifdef OLED_DRIVER_ENABLE
#define OLED_DISPLAY_128X64
#endif
#ifdef RGBLIGHT_ENABLE
// #define RGBLIGHT_ANIMATIONS
#define RGBLIGHT_EFFECT_RAINBOW_SWIRL
#define RGBLIGHT_HUE_STEP 8
#define RGBLIGHT_SAT_STEP 8
#define RGBLIGHT_VAL_STEP 8
#endif
#define PERMISSIVE_HOLD
| kmtoki/qmk_firmware | keyboards/arch_36/keymaps/obosob/config.h | C | gpl-2.0 | 994 |
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'justify', 'nb', {
block: 'Blokkjuster',
center: 'Midtstill',
left: 'Venstrejuster',
right: 'Høyrejuster'
});
| rcav/rcav | wp-content/plugins/ml-slider-pro/modules/layer/assets/ckeditor/plugins/justify/lang/nb.js | JavaScript | gpl-2.0 | 289 |
/*
* kernel/power/suspend.c - Suspend to RAM and standby functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is released under the GPLv2.
*/
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <trace/events/power.h>
#include "power.h"
const char *const pm_states[PM_SUSPEND_MAX] = {
#ifdef CONFIG_EARLYSUSPEND
[PM_SUSPEND_ON] = "on",
#endif
[PM_SUSPEND_STANDBY] = "standby",
[PM_SUSPEND_MEM] = "mem",
};
static const struct platform_suspend_ops *suspend_ops;
/**
* suspend_set_ops - Set the global suspend method table.
* @ops: Pointer to ops structure.
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
mutex_lock(&pm_mutex);
suspend_ops = ops;
mutex_unlock(&pm_mutex);
}
bool valid_state(suspend_state_t state)
{
/*
* All states need lowlevel support and need to be valid to the lowlevel
* implementation, no valid callback implies that none are valid.
*/
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
}
/**
* suspend_valid_only_mem - generic memory-only valid callback
*
* Platform drivers that implement mem suspend only and only need
* to check for that in their .valid callback can use this instead
* of rolling their own .valid callback.
*/
int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
if (pm_test_level == level) {
printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
mdelay(5000);
return 1;
}
#endif /* !CONFIG_PM_DEBUG */
return 0;
}
/**
* suspend_prepare - Do prep work before entering low-power state.
*
* This is common code that is called for each state that we're entering.
* Run suspend notifiers, allocate a console and stop all processes.
*/
static int suspend_prepare(void)
{
int error;
if (!suspend_ops || !suspend_ops->enter)
return -EPERM;
pm_prepare_console();
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
goto Finish;
error = usermodehelper_disable();
if (error)
goto Finish;
error = suspend_freeze_processes();
if (!error)
return 0;
suspend_thaw_processes();
usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
return error;
}
/* default implementation */
void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
{
local_irq_disable();
}
/* default implementation */
void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
{
local_irq_enable();
}
/**
* suspend_enter - enter the desired system sleep state.
* @state: state to enter
*
* This function should be called after devices have been suspended.
*/
static int suspend_enter(suspend_state_t state)
{
int error;
if (suspend_ops->prepare) {
error = suspend_ops->prepare();
if (error)
goto Platform_finish;
}
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down\n");
goto Platform_finish;
}
if (suspend_ops->prepare_late) {
error = suspend_ops->prepare_late();
if (error)
goto Platform_wake;
}
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
error = syscore_suspend();
if (!error) {
if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
error = suspend_ops->enter(state);
events_check_enabled = false;
}
syscore_resume();
}
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
Enable_cpus:
enable_nonboot_cpus();
Platform_wake:
if (suspend_ops->wake)
suspend_ops->wake();
dpm_resume_noirq(PMSG_RESUME);
Platform_finish:
if (suspend_ops->finish)
suspend_ops->finish();
return error;
}
/**
* suspend_devices_and_enter - suspend devices and enter the desired system
* sleep state.
* @state: state to enter
*/
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
if (!suspend_ops)
return -ENOSYS;
trace_machine_suspend(state);
if (suspend_ops->begin) {
error = suspend_ops->begin(state);
if (error)
goto Close;
}
suspend_console();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: Some devices failed to suspend\n");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
error = suspend_enter(state);
Resume_devices:
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
resume_console();
Close:
if (suspend_ops->end)
suspend_ops->end();
trace_machine_suspend(PWR_EVENT_EXIT);
return error;
Recover_platform:
if (suspend_ops->recover)
suspend_ops->recover();
goto Resume_devices;
}
/**
* suspend_finish - Do final work before exiting suspend sequence.
*
* Call platform code to clean up, restart processes, and free the
* console that we've allocated. This is not called for suspend-to-disk.
*/
static void suspend_finish(void)
{
suspend_thaw_processes();
usermodehelper_enable();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}
/**
* enter_state - Do common work of entering low-power state.
* @state: pm_state structure for state we're entering.
*
* Make sure we're the only ones trying to enter a sleep state. Fail
* if someone has beat us to it, since we don't want anything weird to
* happen when we wake up.
* Then, do the setup for suspend, enter the state, and cleaup (after
* we've woken up).
*/
int enter_state(suspend_state_t state)
{
int error;
if (!valid_state(state))
return -ENODEV;
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
//printk(KERN_INFO "PM: Syncing filesystems ... ");
//sys_sync();
//printk("done.\n");
suspend_sys_sync_queue();
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare();
if (error)
goto Unlock;
if (suspend_test(TEST_FREEZER))
goto Finish;
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
Finish:
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
mutex_unlock(&pm_mutex);
return error;
}
/**
* pm_suspend - Externally visible function for suspending system.
* @state: Enumerated value of state to enter.
*
* Determine whether or not value is within range, get state
* structure, and enter (above).
*/
int pm_suspend(suspend_state_t state)
{
if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
return enter_state(state);
return -EINVAL;
}
EXPORT_SYMBOL(pm_suspend);
| mangusta86/android_kernel_huawei_k3v2oem1 | kernel/power/suspend.c | C | gpl-2.0 | 7,102 |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magentocommerce.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magentocommerce.com for more information.
*
* @category Mage
* @package Mage_Customer
* @copyright Copyright (c) 2014 Magento Inc. (http://www.magentocommerce.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/* @var $installer Mage_Customer_Model_Entity_Setup */
$installer = $this;
$installer->startSetup();
$setup = $installer->getConnection();
$select = $setup->select()
->from($installer->getTable('core/config_data'), 'COUNT(*)')
->where('path=?', 'customer/address/prefix_show')
->where('value!=?', '0');
$showPrefix = (bool)Mage::helper('customer/address')->getConfig('prefix_show')
|| $setup->fetchOne($select) > 0;
$select = $setup->select()
->from($installer->getTable('core/config_data'), 'COUNT(*)')
->where('path=?', 'customer/address/middlename_show')
->where('value!=?', '0');
$showMiddlename = (bool)Mage::helper('customer/address')->getConfig('middlename_show')
|| $setup->fetchOne($select) > 0;
$select = $setup->select()
->from($installer->getTable('core/config_data'), 'COUNT(*)')
->where('path=?', 'customer/address/suffix_show')
->where('value!=?', '0');
$showSuffix = (bool)Mage::helper('customer/address')->getConfig('suffix_show')
|| $setup->fetchOne($select) > 0;
$select = $setup->select()
->from($installer->getTable('core/config_data'), 'COUNT(*)')
->where('path=?', 'customer/address/dob_show')
->where('value!=?', '0');
$showDob = (bool)Mage::helper('customer/address')->getConfig('dob_show')
|| $setup->fetchOne($select) > 0;
$select = $setup->select()
->from($installer->getTable('core/config_data'), 'COUNT(*)')
->where('path=?', 'customer/address/taxvat_show')
->where('value!=?', '0');
$showTaxVat = (bool)Mage::helper('customer/address')->getConfig('taxvat_show')
|| $setup->fetchOne($select) > 0;
/**
*****************************************************************************
* customer/account/create/
*****************************************************************************
*/
$setup->insert($installer->getTable('eav/form_type'), array(
'code' => 'customer_account_create',
'label' => 'customer_account_create',
'is_system' => 1,
'theme' => '',
'store_id' => 0
));
$formTypeId = $setup->lastInsertId();
$entityTypeId = $installer->getEntityTypeId('customer');
$setup->insert($installer->getTable('eav/form_type_entity'), array(
'type_id' => $formTypeId,
'entity_type_id' => $entityTypeId
));
$setup->insert($installer->getTable('eav/form_fieldset'), array(
'type_id' => $formTypeId,
'code' => 'general',
'sort_order' => 1
));
$fieldsetId = $setup->lastInsertId();
$setup->insert($installer->getTable('eav/form_fieldset_label'), array(
'fieldset_id' => $fieldsetId,
'store_id' => 0,
'label' => 'Personal Information'
));
$elementSort = 0;
if ($showPrefix) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'prefix'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'firstname'),
'sort_order' => $elementSort++
));
if ($showMiddlename) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'middlename'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'lastname'),
'sort_order' => $elementSort++
));
if ($showSuffix) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'suffix'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'email'),
'sort_order' => $elementSort++
));
if ($showDob) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'dob'),
'sort_order' => $elementSort++
));
}
if ($showTaxVat) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'taxvat'),
'sort_order' => $elementSort++
));
}
/**
*****************************************************************************
* customer/account/edit/
*****************************************************************************
*/
$setup->insert($installer->getTable('eav/form_type'), array(
'code' => 'customer_account_edit',
'label' => 'customer_account_edit',
'is_system' => 1,
'theme' => '',
'store_id' => 0
));
$formTypeId = $setup->lastInsertId();
$entityTypeId = $installer->getEntityTypeId('customer');
$setup->insert($installer->getTable('eav/form_type_entity'), array(
'type_id' => $formTypeId,
'entity_type_id' => $entityTypeId
));
$setup->insert($installer->getTable('eav/form_fieldset'), array(
'type_id' => $formTypeId,
'code' => 'general',
'sort_order' => 1
));
$fieldsetId = $setup->lastInsertId();
$setup->insert($installer->getTable('eav/form_fieldset_label'), array(
'fieldset_id' => $fieldsetId,
'store_id' => 0,
'label' => 'Account Information'
));
$elementSort = 0;
if ($showPrefix) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'prefix'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'firstname'),
'sort_order' => $elementSort++
));
if ($showMiddlename) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'middlename'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'lastname'),
'sort_order' => $elementSort++
));
if ($showSuffix) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'suffix'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'email'),
'sort_order' => $elementSort++
));
if ($showDob) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'dob'),
'sort_order' => $elementSort++
));
}
if ($showTaxVat) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'taxvat'),
'sort_order' => $elementSort++
));
}
/**
*****************************************************************************
* customer/address/edit
*****************************************************************************
*/
$setup->insert($installer->getTable('eav/form_type'), array(
'code' => 'customer_address_edit',
'label' => 'customer_address_edit',
'is_system' => 1,
'theme' => '',
'store_id' => 0
));
$formTypeId = $setup->lastInsertId();
$entityTypeId = $installer->getEntityTypeId('customer_address');
$setup->insert($installer->getTable('eav/form_type_entity'), array(
'type_id' => $formTypeId,
'entity_type_id' => $entityTypeId
));
$setup->insert($installer->getTable('eav/form_fieldset'), array(
'type_id' => $formTypeId,
'code' => 'contact',
'sort_order' => 1
));
$fieldsetId = $setup->lastInsertId();
$setup->insert($installer->getTable('eav/form_fieldset_label'), array(
'fieldset_id' => $fieldsetId,
'store_id' => 0,
'label' => 'Contact Information'
));
$elementSort = 0;
if ($showPrefix) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'prefix'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'firstname'),
'sort_order' => $elementSort++
));
if ($showMiddlename) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'middlename'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'lastname'),
'sort_order' => $elementSort++
));
if ($showSuffix) {
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'suffix'),
'sort_order' => $elementSort++
));
}
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'company'),
'sort_order' => $elementSort++
));
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'telephone'),
'sort_order' => $elementSort++
));
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'fax'),
'sort_order' => $elementSort++
));
$setup->insert($installer->getTable('eav/form_fieldset'), array(
'type_id' => $formTypeId,
'code' => 'address',
'sort_order' => 2
));
$fieldsetId = $setup->lastInsertId();
$setup->insert($installer->getTable('eav/form_fieldset_label'), array(
'fieldset_id' => $fieldsetId,
'store_id' => 0,
'label' => 'Address'
));
$elementSort = 0;
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'street'),
'sort_order' => $elementSort++
));
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'city'),
'sort_order' => $elementSort++
));
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'region'),
'sort_order' => $elementSort++
));
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'postcode'),
'sort_order' => $elementSort++
));
$setup->insert($installer->getTable('eav/form_element'), array(
'type_id' => $formTypeId,
'fieldset_id' => $fieldsetId,
'attribute_id' => $installer->getAttributeId($entityTypeId, 'country_id'),
'sort_order' => $elementSort++
));
$installer->endSetup();
| Eristoff47/P2 | src/public/app/code/core/Mage/Customer/sql/customer_setup/mysql4-upgrade-0.8.11-0.8.12.php | PHP | gpl-2.0 | 14,121 |
/*
* (C) Copyright 2003, Psyent Corporation <www.psyent.com>
* Scott McNutt <smcnutt@psyent.com>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <command.h>
#include <asm/byteorder.h>
#include <asm/cache.h>
#define NIOS_MAGIC 0x534f494e /* enable command line and initrd passing */
int do_bootm_linux(int flag, int argc, char *argv[], bootm_headers_t *images)
{
void (*kernel)(int, int, int, char *) = (void *)images->ep;
char *commandline = getenv("bootargs");
ulong initrd_start = images->rd_start;
ulong initrd_end = images->rd_end;
if ((flag != 0) && (flag != BOOTM_STATE_OS_GO))
return 1;
/* flushes data and instruction caches before calling the kernel */
disable_interrupts();
flush_dcache((ulong)kernel, CONFIG_SYS_DCACHE_SIZE);
flush_icache((ulong)kernel, CONFIG_SYS_ICACHE_SIZE);
debug("bootargs=%s @ 0x%lx\n", commandline, (ulong)&commandline);
debug("initrd=0x%lx-0x%lx\n", (ulong)initrd_start, (ulong)initrd_end);
kernel(NIOS_MAGIC, initrd_start, initrd_end, commandline);
/* does not return */
return 1;
}
| kbridgers/VOLTE4GFAX | uboot/u-boot-2010.06/arch/nios2/lib/bootm.c | C | gpl-2.0 | 1,825 |
/************************************************************************************************/
/* */
/* Copyright 2013 Broadcom Corporation */
/* */
/* Unless you and Broadcom execute a separate written software license agreement governing */
/* use of this software, this software is licensed to you under the terms of the GNU */
/* General Public License version 2 (the GPL), available at */
/* */
/* http://www.broadcom.com/licenses/GPLv2.php */
/* */
/* with the following added to such license: */
/* */
/* As a special exception, the copyright holders of this software give you permission to */
/* link this software with independent modules, and to copy and distribute the resulting */
/* executable under terms of your choice, provided that you also meet, for each linked */
/* independent module, the terms and conditions of the license of that module. */
/* An independent module is a module which is not derived from this software. The special */
/* exception does not apply to any modifications of the software. */
/* */
/* Notwithstanding the above, under no circumstances may you combine this software in any */
/* way with any other Broadcom software provided under a license other than the GPL, */
/* without Broadcom's express prior written consent. */
/* */
/* Date : Generated on 3/4/2013 11:52:5 */
/* RDB file : //JAVA/ */
/************************************************************************************************/
#ifndef __BRCM_RDB_PADCTRLREG_H__
#define __BRCM_RDB_PADCTRLREG_H__
#define PADCTRLREG_ADCSYN_OFFSET 0x00000000
#define PADCTRLREG_ADCSYN_TYPE UInt32
#define PADCTRLREG_ADCSYN_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_ADCSYN_PINSEL_ADCSYN_SHIFT 8
#define PADCTRLREG_ADCSYN_PINSEL_ADCSYN_MASK 0x00000700
#define PADCTRLREG_ADCSYN_HYS_EN_ADCSYN_SHIFT 7
#define PADCTRLREG_ADCSYN_HYS_EN_ADCSYN_MASK 0x00000080
#define PADCTRLREG_ADCSYN_PDN_ADCSYN_SHIFT 6
#define PADCTRLREG_ADCSYN_PDN_ADCSYN_MASK 0x00000040
#define PADCTRLREG_ADCSYN_PUP_ADCSYN_SHIFT 5
#define PADCTRLREG_ADCSYN_PUP_ADCSYN_MASK 0x00000020
#define PADCTRLREG_ADCSYN_SRC_ADCSYN_SHIFT 4
#define PADCTRLREG_ADCSYN_SRC_ADCSYN_MASK 0x00000010
#define PADCTRLREG_ADCSYN_IND_ADCSYN_SHIFT 3
#define PADCTRLREG_ADCSYN_IND_ADCSYN_MASK 0x00000008
#define PADCTRLREG_ADCSYN_SEL_2_ADCSYN_SHIFT 2
#define PADCTRLREG_ADCSYN_SEL_2_ADCSYN_MASK 0x00000004
#define PADCTRLREG_ADCSYN_SEL_1_ADCSYN_SHIFT 1
#define PADCTRLREG_ADCSYN_SEL_1_ADCSYN_MASK 0x00000002
#define PADCTRLREG_ADCSYN_SEL_0_ADCSYN_SHIFT 0
#define PADCTRLREG_ADCSYN_SEL_0_ADCSYN_MASK 0x00000001
#define PADCTRLREG_BATRM_OFFSET 0x00000004
#define PADCTRLREG_BATRM_TYPE UInt32
#define PADCTRLREG_BATRM_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_BATRM_PINSEL_BATRM_SHIFT 8
#define PADCTRLREG_BATRM_PINSEL_BATRM_MASK 0x00000700
#define PADCTRLREG_BATRM_HYS_EN_BATRM_SHIFT 7
#define PADCTRLREG_BATRM_HYS_EN_BATRM_MASK 0x00000080
#define PADCTRLREG_BATRM_PDN_BATRM_SHIFT 6
#define PADCTRLREG_BATRM_PDN_BATRM_MASK 0x00000040
#define PADCTRLREG_BATRM_PUP_BATRM_SHIFT 5
#define PADCTRLREG_BATRM_PUP_BATRM_MASK 0x00000020
#define PADCTRLREG_BATRM_SRC_BATRM_SHIFT 4
#define PADCTRLREG_BATRM_SRC_BATRM_MASK 0x00000010
#define PADCTRLREG_BATRM_IND_BATRM_SHIFT 3
#define PADCTRLREG_BATRM_IND_BATRM_MASK 0x00000008
#define PADCTRLREG_BATRM_SEL_2_BATRM_SHIFT 2
#define PADCTRLREG_BATRM_SEL_2_BATRM_MASK 0x00000004
#define PADCTRLREG_BATRM_SEL_1_BATRM_SHIFT 1
#define PADCTRLREG_BATRM_SEL_1_BATRM_MASK 0x00000002
#define PADCTRLREG_BATRM_SEL_0_BATRM_SHIFT 0
#define PADCTRLREG_BATRM_SEL_0_BATRM_MASK 0x00000001
#define PADCTRLREG_BSC1CLK_OFFSET 0x00000008
#define PADCTRLREG_BSC1CLK_TYPE UInt32
#define PADCTRLREG_BSC1CLK_RESERVED_MASK 0xFFFFF8C7
#define PADCTRLREG_BSC1CLK_PINSEL_BSC1CLK_SHIFT 8
#define PADCTRLREG_BSC1CLK_PINSEL_BSC1CLK_MASK 0x00000700
#define PADCTRLREG_BSC1CLK_PUP_BSC1CLK_SHIFT 5
#define PADCTRLREG_BSC1CLK_PUP_BSC1CLK_MASK 0x00000020
#define PADCTRLREG_BSC1CLK_SRC_BSC1CLK_SHIFT 4
#define PADCTRLREG_BSC1CLK_SRC_BSC1CLK_MASK 0x00000010
#define PADCTRLREG_BSC1CLK_IND_BSC1CLK_SHIFT 3
#define PADCTRLREG_BSC1CLK_IND_BSC1CLK_MASK 0x00000008
#define PADCTRLREG_BSC1DAT_OFFSET 0x0000000C
#define PADCTRLREG_BSC1DAT_TYPE UInt32
#define PADCTRLREG_BSC1DAT_RESERVED_MASK 0xFFFFF8C7
#define PADCTRLREG_BSC1DAT_PINSEL_BSC1DAT_SHIFT 8
#define PADCTRLREG_BSC1DAT_PINSEL_BSC1DAT_MASK 0x00000700
#define PADCTRLREG_BSC1DAT_PUP_BSC1DAT_SHIFT 5
#define PADCTRLREG_BSC1DAT_PUP_BSC1DAT_MASK 0x00000020
#define PADCTRLREG_BSC1DAT_SRC_BSC1DAT_SHIFT 4
#define PADCTRLREG_BSC1DAT_SRC_BSC1DAT_MASK 0x00000010
#define PADCTRLREG_BSC1DAT_IND_BSC1DAT_SHIFT 3
#define PADCTRLREG_BSC1DAT_IND_BSC1DAT_MASK 0x00000008
#define PADCTRLREG_CAMCS0_OFFSET 0x00000010
#define PADCTRLREG_CAMCS0_TYPE UInt32
#define PADCTRLREG_CAMCS0_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_CAMCS0_PINSEL_CAMCS0_SHIFT 8
#define PADCTRLREG_CAMCS0_PINSEL_CAMCS0_MASK 0x00000700
#define PADCTRLREG_CAMCS0_HYS_EN_CAMCS0_SHIFT 7
#define PADCTRLREG_CAMCS0_HYS_EN_CAMCS0_MASK 0x00000080
#define PADCTRLREG_CAMCS0_PDN_CAMCS0_SHIFT 6
#define PADCTRLREG_CAMCS0_PDN_CAMCS0_MASK 0x00000040
#define PADCTRLREG_CAMCS0_PUP_CAMCS0_SHIFT 5
#define PADCTRLREG_CAMCS0_PUP_CAMCS0_MASK 0x00000020
#define PADCTRLREG_CAMCS0_SRC_CAMCS0_SHIFT 4
#define PADCTRLREG_CAMCS0_SRC_CAMCS0_MASK 0x00000010
#define PADCTRLREG_CAMCS0_IND_CAMCS0_SHIFT 3
#define PADCTRLREG_CAMCS0_IND_CAMCS0_MASK 0x00000008
#define PADCTRLREG_CAMCS0_SEL_2_CAMCS0_SHIFT 2
#define PADCTRLREG_CAMCS0_SEL_2_CAMCS0_MASK 0x00000004
#define PADCTRLREG_CAMCS0_SEL_1_CAMCS0_SHIFT 1
#define PADCTRLREG_CAMCS0_SEL_1_CAMCS0_MASK 0x00000002
#define PADCTRLREG_CAMCS0_SEL_0_CAMCS0_SHIFT 0
#define PADCTRLREG_CAMCS0_SEL_0_CAMCS0_MASK 0x00000001
#define PADCTRLREG_CAMCS1_OFFSET 0x00000014
#define PADCTRLREG_CAMCS1_TYPE UInt32
#define PADCTRLREG_CAMCS1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_CAMCS1_PINSEL_CAMCS1_SHIFT 8
#define PADCTRLREG_CAMCS1_PINSEL_CAMCS1_MASK 0x00000700
#define PADCTRLREG_CAMCS1_HYS_EN_CAMCS1_SHIFT 7
#define PADCTRLREG_CAMCS1_HYS_EN_CAMCS1_MASK 0x00000080
#define PADCTRLREG_CAMCS1_PDN_CAMCS1_SHIFT 6
#define PADCTRLREG_CAMCS1_PDN_CAMCS1_MASK 0x00000040
#define PADCTRLREG_CAMCS1_PUP_CAMCS1_SHIFT 5
#define PADCTRLREG_CAMCS1_PUP_CAMCS1_MASK 0x00000020
#define PADCTRLREG_CAMCS1_SRC_CAMCS1_SHIFT 4
#define PADCTRLREG_CAMCS1_SRC_CAMCS1_MASK 0x00000010
#define PADCTRLREG_CAMCS1_IND_CAMCS1_SHIFT 3
#define PADCTRLREG_CAMCS1_IND_CAMCS1_MASK 0x00000008
#define PADCTRLREG_CAMCS1_SEL_2_CAMCS1_SHIFT 2
#define PADCTRLREG_CAMCS1_SEL_2_CAMCS1_MASK 0x00000004
#define PADCTRLREG_CAMCS1_SEL_1_CAMCS1_SHIFT 1
#define PADCTRLREG_CAMCS1_SEL_1_CAMCS1_MASK 0x00000002
#define PADCTRLREG_CAMCS1_SEL_0_CAMCS1_SHIFT 0
#define PADCTRLREG_CAMCS1_SEL_0_CAMCS1_MASK 0x00000001
#define PADCTRLREG_CLK32K_OFFSET 0x00000018
#define PADCTRLREG_CLK32K_TYPE UInt32
#define PADCTRLREG_CLK32K_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_CLK32K_PINSEL_CLK32K_SHIFT 8
#define PADCTRLREG_CLK32K_PINSEL_CLK32K_MASK 0x00000700
#define PADCTRLREG_CLK32K_HYS_EN_CLK32K_SHIFT 7
#define PADCTRLREG_CLK32K_HYS_EN_CLK32K_MASK 0x00000080
#define PADCTRLREG_CLK32K_PDN_CLK32K_SHIFT 6
#define PADCTRLREG_CLK32K_PDN_CLK32K_MASK 0x00000040
#define PADCTRLREG_CLK32K_PUP_CLK32K_SHIFT 5
#define PADCTRLREG_CLK32K_PUP_CLK32K_MASK 0x00000020
#define PADCTRLREG_CLK32K_SRC_CLK32K_SHIFT 4
#define PADCTRLREG_CLK32K_SRC_CLK32K_MASK 0x00000010
#define PADCTRLREG_CLK32K_IND_CLK32K_SHIFT 3
#define PADCTRLREG_CLK32K_IND_CLK32K_MASK 0x00000008
#define PADCTRLREG_CLK32K_SEL_2_CLK32K_SHIFT 2
#define PADCTRLREG_CLK32K_SEL_2_CLK32K_MASK 0x00000004
#define PADCTRLREG_CLK32K_SEL_1_CLK32K_SHIFT 1
#define PADCTRLREG_CLK32K_SEL_1_CLK32K_MASK 0x00000002
#define PADCTRLREG_CLK32K_SEL_0_CLK32K_SHIFT 0
#define PADCTRLREG_CLK32K_SEL_0_CLK32K_MASK 0x00000001
#define PADCTRLREG_CLK_CX8_OFFSET 0x0000001C
#define PADCTRLREG_CLK_CX8_TYPE UInt32
#define PADCTRLREG_CLK_CX8_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_CLK_CX8_PINSEL_CLK_CX8_SHIFT 8
#define PADCTRLREG_CLK_CX8_PINSEL_CLK_CX8_MASK 0x00000700
#define PADCTRLREG_CLK_CX8_HYS_EN_CLK_CX8_SHIFT 7
#define PADCTRLREG_CLK_CX8_HYS_EN_CLK_CX8_MASK 0x00000080
#define PADCTRLREG_CLK_CX8_PDN_CLK_CX8_SHIFT 6
#define PADCTRLREG_CLK_CX8_PDN_CLK_CX8_MASK 0x00000040
#define PADCTRLREG_CLK_CX8_PUP_CLK_CX8_SHIFT 5
#define PADCTRLREG_CLK_CX8_PUP_CLK_CX8_MASK 0x00000020
#define PADCTRLREG_CLK_CX8_SRC_CLK_CX8_SHIFT 4
#define PADCTRLREG_CLK_CX8_SRC_CLK_CX8_MASK 0x00000010
#define PADCTRLREG_CLK_CX8_IND_CLK_CX8_SHIFT 3
#define PADCTRLREG_CLK_CX8_IND_CLK_CX8_MASK 0x00000008
#define PADCTRLREG_CLK_CX8_SEL_2_CLK_CX8_SHIFT 2
#define PADCTRLREG_CLK_CX8_SEL_2_CLK_CX8_MASK 0x00000004
#define PADCTRLREG_CLK_CX8_SEL_1_CLK_CX8_SHIFT 1
#define PADCTRLREG_CLK_CX8_SEL_1_CLK_CX8_MASK 0x00000002
#define PADCTRLREG_CLK_CX8_SEL_0_CLK_CX8_SHIFT 0
#define PADCTRLREG_CLK_CX8_SEL_0_CLK_CX8_MASK 0x00000001
#define PADCTRLREG_DCLK1_OFFSET 0x00000020
#define PADCTRLREG_DCLK1_TYPE UInt32
#define PADCTRLREG_DCLK1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_DCLK1_PINSEL_DCLK1_SHIFT 8
#define PADCTRLREG_DCLK1_PINSEL_DCLK1_MASK 0x00000700
#define PADCTRLREG_DCLK1_HYS_EN_DCLK1_SHIFT 7
#define PADCTRLREG_DCLK1_HYS_EN_DCLK1_MASK 0x00000080
#define PADCTRLREG_DCLK1_PDN_DCLK1_SHIFT 6
#define PADCTRLREG_DCLK1_PDN_DCLK1_MASK 0x00000040
#define PADCTRLREG_DCLK1_PUP_DCLK1_SHIFT 5
#define PADCTRLREG_DCLK1_PUP_DCLK1_MASK 0x00000020
#define PADCTRLREG_DCLK1_SRC_DCLK1_SHIFT 4
#define PADCTRLREG_DCLK1_SRC_DCLK1_MASK 0x00000010
#define PADCTRLREG_DCLK1_IND_DCLK1_SHIFT 3
#define PADCTRLREG_DCLK1_IND_DCLK1_MASK 0x00000008
#define PADCTRLREG_DCLK1_SEL_2_DCLK1_SHIFT 2
#define PADCTRLREG_DCLK1_SEL_2_DCLK1_MASK 0x00000004
#define PADCTRLREG_DCLK1_SEL_1_DCLK1_SHIFT 1
#define PADCTRLREG_DCLK1_SEL_1_DCLK1_MASK 0x00000002
#define PADCTRLREG_DCLK1_SEL_0_DCLK1_SHIFT 0
#define PADCTRLREG_DCLK1_SEL_0_DCLK1_MASK 0x00000001
#define PADCTRLREG_DCLK4_OFFSET 0x00000024
#define PADCTRLREG_DCLK4_TYPE UInt32
#define PADCTRLREG_DCLK4_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_DCLK4_PINSEL_DCLK4_SHIFT 8
#define PADCTRLREG_DCLK4_PINSEL_DCLK4_MASK 0x00000700
#define PADCTRLREG_DCLK4_HYS_EN_DCLK4_SHIFT 7
#define PADCTRLREG_DCLK4_HYS_EN_DCLK4_MASK 0x00000080
#define PADCTRLREG_DCLK4_PDN_DCLK4_SHIFT 6
#define PADCTRLREG_DCLK4_PDN_DCLK4_MASK 0x00000040
#define PADCTRLREG_DCLK4_PUP_DCLK4_SHIFT 5
#define PADCTRLREG_DCLK4_PUP_DCLK4_MASK 0x00000020
#define PADCTRLREG_DCLK4_SRC_DCLK4_SHIFT 4
#define PADCTRLREG_DCLK4_SRC_DCLK4_MASK 0x00000010
#define PADCTRLREG_DCLK4_IND_DCLK4_SHIFT 3
#define PADCTRLREG_DCLK4_IND_DCLK4_MASK 0x00000008
#define PADCTRLREG_DCLK4_SEL_2_DCLK4_SHIFT 2
#define PADCTRLREG_DCLK4_SEL_2_DCLK4_MASK 0x00000004
#define PADCTRLREG_DCLK4_SEL_1_DCLK4_SHIFT 1
#define PADCTRLREG_DCLK4_SEL_1_DCLK4_MASK 0x00000002
#define PADCTRLREG_DCLK4_SEL_0_DCLK4_SHIFT 0
#define PADCTRLREG_DCLK4_SEL_0_DCLK4_MASK 0x00000001
#define PADCTRLREG_DCLKREQ1_OFFSET 0x00000028
#define PADCTRLREG_DCLKREQ1_TYPE UInt32
#define PADCTRLREG_DCLKREQ1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_DCLKREQ1_PINSEL_DCLKREQ1_SHIFT 8
#define PADCTRLREG_DCLKREQ1_PINSEL_DCLKREQ1_MASK 0x00000700
#define PADCTRLREG_DCLKREQ1_HYS_EN_DCLKREQ1_SHIFT 7
#define PADCTRLREG_DCLKREQ1_HYS_EN_DCLKREQ1_MASK 0x00000080
#define PADCTRLREG_DCLKREQ1_PDN_DCLKREQ1_SHIFT 6
#define PADCTRLREG_DCLKREQ1_PDN_DCLKREQ1_MASK 0x00000040
#define PADCTRLREG_DCLKREQ1_PUP_DCLKREQ1_SHIFT 5
#define PADCTRLREG_DCLKREQ1_PUP_DCLKREQ1_MASK 0x00000020
#define PADCTRLREG_DCLKREQ1_SRC_DCLKREQ1_SHIFT 4
#define PADCTRLREG_DCLKREQ1_SRC_DCLKREQ1_MASK 0x00000010
#define PADCTRLREG_DCLKREQ1_IND_DCLKREQ1_SHIFT 3
#define PADCTRLREG_DCLKREQ1_IND_DCLKREQ1_MASK 0x00000008
#define PADCTRLREG_DCLKREQ1_SEL_2_DCLKREQ1_SHIFT 2
#define PADCTRLREG_DCLKREQ1_SEL_2_DCLKREQ1_MASK 0x00000004
#define PADCTRLREG_DCLKREQ1_SEL_1_DCLKREQ1_SHIFT 1
#define PADCTRLREG_DCLKREQ1_SEL_1_DCLKREQ1_MASK 0x00000002
#define PADCTRLREG_DCLKREQ1_SEL_0_DCLKREQ1_SHIFT 0
#define PADCTRLREG_DCLKREQ1_SEL_0_DCLKREQ1_MASK 0x00000001
#define PADCTRLREG_DCLKREQ4_OFFSET 0x0000002C
#define PADCTRLREG_DCLKREQ4_TYPE UInt32
#define PADCTRLREG_DCLKREQ4_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_DCLKREQ4_PINSEL_DCLKREQ4_SHIFT 8
#define PADCTRLREG_DCLKREQ4_PINSEL_DCLKREQ4_MASK 0x00000700
#define PADCTRLREG_DCLKREQ4_HYS_EN_DCLKREQ4_SHIFT 7
#define PADCTRLREG_DCLKREQ4_HYS_EN_DCLKREQ4_MASK 0x00000080
#define PADCTRLREG_DCLKREQ4_PDN_DCLKREQ4_SHIFT 6
#define PADCTRLREG_DCLKREQ4_PDN_DCLKREQ4_MASK 0x00000040
#define PADCTRLREG_DCLKREQ4_PUP_DCLKREQ4_SHIFT 5
#define PADCTRLREG_DCLKREQ4_PUP_DCLKREQ4_MASK 0x00000020
#define PADCTRLREG_DCLKREQ4_SRC_DCLKREQ4_SHIFT 4
#define PADCTRLREG_DCLKREQ4_SRC_DCLKREQ4_MASK 0x00000010
#define PADCTRLREG_DCLKREQ4_IND_DCLKREQ4_SHIFT 3
#define PADCTRLREG_DCLKREQ4_IND_DCLKREQ4_MASK 0x00000008
#define PADCTRLREG_DCLKREQ4_SEL_2_DCLKREQ4_SHIFT 2
#define PADCTRLREG_DCLKREQ4_SEL_2_DCLKREQ4_MASK 0x00000004
#define PADCTRLREG_DCLKREQ4_SEL_1_DCLKREQ4_SHIFT 1
#define PADCTRLREG_DCLKREQ4_SEL_1_DCLKREQ4_MASK 0x00000002
#define PADCTRLREG_DCLKREQ4_SEL_0_DCLKREQ4_SHIFT 0
#define PADCTRLREG_DCLKREQ4_SEL_0_DCLKREQ4_MASK 0x00000001
#define PADCTRLREG_DMIC0CLK_OFFSET 0x00000030
#define PADCTRLREG_DMIC0CLK_TYPE UInt32
#define PADCTRLREG_DMIC0CLK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_DMIC0CLK_PINSEL_DMIC0CLK_SHIFT 8
#define PADCTRLREG_DMIC0CLK_PINSEL_DMIC0CLK_MASK 0x00000700
#define PADCTRLREG_DMIC0CLK_HYS_EN_DMIC0CLK_SHIFT 7
#define PADCTRLREG_DMIC0CLK_HYS_EN_DMIC0CLK_MASK 0x00000080
#define PADCTRLREG_DMIC0CLK_PDN_DMIC0CLK_SHIFT 6
#define PADCTRLREG_DMIC0CLK_PDN_DMIC0CLK_MASK 0x00000040
#define PADCTRLREG_DMIC0CLK_PUP_DMIC0CLK_SHIFT 5
#define PADCTRLREG_DMIC0CLK_PUP_DMIC0CLK_MASK 0x00000020
#define PADCTRLREG_DMIC0CLK_SRC_DMIC0CLK_SHIFT 4
#define PADCTRLREG_DMIC0CLK_SRC_DMIC0CLK_MASK 0x00000010
#define PADCTRLREG_DMIC0CLK_IND_DMIC0CLK_SHIFT 3
#define PADCTRLREG_DMIC0CLK_IND_DMIC0CLK_MASK 0x00000008
#define PADCTRLREG_DMIC0CLK_SEL_2_DMIC0CLK_SHIFT 2
#define PADCTRLREG_DMIC0CLK_SEL_2_DMIC0CLK_MASK 0x00000004
#define PADCTRLREG_DMIC0CLK_SEL_1_DMIC0CLK_SHIFT 1
#define PADCTRLREG_DMIC0CLK_SEL_1_DMIC0CLK_MASK 0x00000002
#define PADCTRLREG_DMIC0CLK_SEL_0_DMIC0CLK_SHIFT 0
#define PADCTRLREG_DMIC0CLK_SEL_0_DMIC0CLK_MASK 0x00000001
#define PADCTRLREG_DMIC0DQ_OFFSET 0x00000034
#define PADCTRLREG_DMIC0DQ_TYPE UInt32
#define PADCTRLREG_DMIC0DQ_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_DMIC0DQ_PINSEL_DMIC0DQ_SHIFT 8
#define PADCTRLREG_DMIC0DQ_PINSEL_DMIC0DQ_MASK 0x00000700
#define PADCTRLREG_DMIC0DQ_HYS_EN_DMIC0DQ_SHIFT 7
#define PADCTRLREG_DMIC0DQ_HYS_EN_DMIC0DQ_MASK 0x00000080
#define PADCTRLREG_DMIC0DQ_PDN_DMIC0DQ_SHIFT 6
#define PADCTRLREG_DMIC0DQ_PDN_DMIC0DQ_MASK 0x00000040
#define PADCTRLREG_DMIC0DQ_PUP_DMIC0DQ_SHIFT 5
#define PADCTRLREG_DMIC0DQ_PUP_DMIC0DQ_MASK 0x00000020
#define PADCTRLREG_DMIC0DQ_SRC_DMIC0DQ_SHIFT 4
#define PADCTRLREG_DMIC0DQ_SRC_DMIC0DQ_MASK 0x00000010
#define PADCTRLREG_DMIC0DQ_IND_DMIC0DQ_SHIFT 3
#define PADCTRLREG_DMIC0DQ_IND_DMIC0DQ_MASK 0x00000008
#define PADCTRLREG_DMIC0DQ_SEL_2_DMIC0DQ_SHIFT 2
#define PADCTRLREG_DMIC0DQ_SEL_2_DMIC0DQ_MASK 0x00000004
#define PADCTRLREG_DMIC0DQ_SEL_1_DMIC0DQ_SHIFT 1
#define PADCTRLREG_DMIC0DQ_SEL_1_DMIC0DQ_MASK 0x00000002
#define PADCTRLREG_DMIC0DQ_SEL_0_DMIC0DQ_SHIFT 0
#define PADCTRLREG_DMIC0DQ_SEL_0_DMIC0DQ_MASK 0x00000001
#define PADCTRLREG_DSI0TE_OFFSET 0x00000038
#define PADCTRLREG_DSI0TE_TYPE UInt32
#define PADCTRLREG_DSI0TE_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_DSI0TE_PINSEL_DSI0TE_SHIFT 8
#define PADCTRLREG_DSI0TE_PINSEL_DSI0TE_MASK 0x00000700
#define PADCTRLREG_DSI0TE_HYS_EN_DSI0TE_SHIFT 7
#define PADCTRLREG_DSI0TE_HYS_EN_DSI0TE_MASK 0x00000080
#define PADCTRLREG_DSI0TE_PDN_DSI0TE_SHIFT 6
#define PADCTRLREG_DSI0TE_PDN_DSI0TE_MASK 0x00000040
#define PADCTRLREG_DSI0TE_PUP_DSI0TE_SHIFT 5
#define PADCTRLREG_DSI0TE_PUP_DSI0TE_MASK 0x00000020
#define PADCTRLREG_DSI0TE_SRC_DSI0TE_SHIFT 4
#define PADCTRLREG_DSI0TE_SRC_DSI0TE_MASK 0x00000010
#define PADCTRLREG_DSI0TE_IND_DSI0TE_SHIFT 3
#define PADCTRLREG_DSI0TE_IND_DSI0TE_MASK 0x00000008
#define PADCTRLREG_DSI0TE_SEL_2_DSI0TE_SHIFT 2
#define PADCTRLREG_DSI0TE_SEL_2_DSI0TE_MASK 0x00000004
#define PADCTRLREG_DSI0TE_SEL_1_DSI0TE_SHIFT 1
#define PADCTRLREG_DSI0TE_SEL_1_DSI0TE_MASK 0x00000002
#define PADCTRLREG_DSI0TE_SEL_0_DSI0TE_SHIFT 0
#define PADCTRLREG_DSI0TE_SEL_0_DSI0TE_MASK 0x00000001
#define PADCTRLREG_GPIO00_OFFSET 0x0000003C
#define PADCTRLREG_GPIO00_TYPE UInt32
#define PADCTRLREG_GPIO00_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO00_PINSEL_GPIO00_SHIFT 8
#define PADCTRLREG_GPIO00_PINSEL_GPIO00_MASK 0x00000700
#define PADCTRLREG_GPIO00_HYS_EN_GPIO00_SHIFT 7
#define PADCTRLREG_GPIO00_HYS_EN_GPIO00_MASK 0x00000080
#define PADCTRLREG_GPIO00_PDN_GPIO00_SHIFT 6
#define PADCTRLREG_GPIO00_PDN_GPIO00_MASK 0x00000040
#define PADCTRLREG_GPIO00_PUP_GPIO00_SHIFT 5
#define PADCTRLREG_GPIO00_PUP_GPIO00_MASK 0x00000020
#define PADCTRLREG_GPIO00_SRC_GPIO00_SHIFT 4
#define PADCTRLREG_GPIO00_SRC_GPIO00_MASK 0x00000010
#define PADCTRLREG_GPIO00_IND_GPIO00_SHIFT 3
#define PADCTRLREG_GPIO00_IND_GPIO00_MASK 0x00000008
#define PADCTRLREG_GPIO00_SEL_2_GPIO00_SHIFT 2
#define PADCTRLREG_GPIO00_SEL_2_GPIO00_MASK 0x00000004
#define PADCTRLREG_GPIO00_SEL_1_GPIO00_SHIFT 1
#define PADCTRLREG_GPIO00_SEL_1_GPIO00_MASK 0x00000002
#define PADCTRLREG_GPIO00_SEL_0_GPIO00_SHIFT 0
#define PADCTRLREG_GPIO00_SEL_0_GPIO00_MASK 0x00000001
#define PADCTRLREG_GPIO01_OFFSET 0x00000040
#define PADCTRLREG_GPIO01_TYPE UInt32
#define PADCTRLREG_GPIO01_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO01_PINSEL_GPIO01_SHIFT 8
#define PADCTRLREG_GPIO01_PINSEL_GPIO01_MASK 0x00000700
#define PADCTRLREG_GPIO01_HYS_EN_GPIO01_SHIFT 7
#define PADCTRLREG_GPIO01_HYS_EN_GPIO01_MASK 0x00000080
#define PADCTRLREG_GPIO01_PDN_GPIO01_SHIFT 6
#define PADCTRLREG_GPIO01_PDN_GPIO01_MASK 0x00000040
#define PADCTRLREG_GPIO01_PUP_GPIO01_SHIFT 5
#define PADCTRLREG_GPIO01_PUP_GPIO01_MASK 0x00000020
#define PADCTRLREG_GPIO01_SRC_GPIO01_SHIFT 4
#define PADCTRLREG_GPIO01_SRC_GPIO01_MASK 0x00000010
#define PADCTRLREG_GPIO01_IND_GPIO01_SHIFT 3
#define PADCTRLREG_GPIO01_IND_GPIO01_MASK 0x00000008
#define PADCTRLREG_GPIO01_SEL_2_GPIO01_SHIFT 2
#define PADCTRLREG_GPIO01_SEL_2_GPIO01_MASK 0x00000004
#define PADCTRLREG_GPIO01_SEL_1_GPIO01_SHIFT 1
#define PADCTRLREG_GPIO01_SEL_1_GPIO01_MASK 0x00000002
#define PADCTRLREG_GPIO01_SEL_0_GPIO01_SHIFT 0
#define PADCTRLREG_GPIO01_SEL_0_GPIO01_MASK 0x00000001
#define PADCTRLREG_GPIO02_OFFSET 0x00000044
#define PADCTRLREG_GPIO02_TYPE UInt32
#define PADCTRLREG_GPIO02_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO02_PINSEL_GPIO02_SHIFT 8
#define PADCTRLREG_GPIO02_PINSEL_GPIO02_MASK 0x00000700
#define PADCTRLREG_GPIO02_HYS_EN_GPIO02_SHIFT 7
#define PADCTRLREG_GPIO02_HYS_EN_GPIO02_MASK 0x00000080
#define PADCTRLREG_GPIO02_PDN_GPIO02_SHIFT 6
#define PADCTRLREG_GPIO02_PDN_GPIO02_MASK 0x00000040
#define PADCTRLREG_GPIO02_PUP_GPIO02_SHIFT 5
#define PADCTRLREG_GPIO02_PUP_GPIO02_MASK 0x00000020
#define PADCTRLREG_GPIO02_SRC_GPIO02_SHIFT 4
#define PADCTRLREG_GPIO02_SRC_GPIO02_MASK 0x00000010
#define PADCTRLREG_GPIO02_IND_GPIO02_SHIFT 3
#define PADCTRLREG_GPIO02_IND_GPIO02_MASK 0x00000008
#define PADCTRLREG_GPIO02_SEL_2_GPIO02_SHIFT 2
#define PADCTRLREG_GPIO02_SEL_2_GPIO02_MASK 0x00000004
#define PADCTRLREG_GPIO02_SEL_1_GPIO02_SHIFT 1
#define PADCTRLREG_GPIO02_SEL_1_GPIO02_MASK 0x00000002
#define PADCTRLREG_GPIO02_SEL_0_GPIO02_SHIFT 0
#define PADCTRLREG_GPIO02_SEL_0_GPIO02_MASK 0x00000001
#define PADCTRLREG_GPIO03_OFFSET 0x00000048
#define PADCTRLREG_GPIO03_TYPE UInt32
#define PADCTRLREG_GPIO03_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO03_PINSEL_GPIO03_SHIFT 8
#define PADCTRLREG_GPIO03_PINSEL_GPIO03_MASK 0x00000700
#define PADCTRLREG_GPIO03_HYS_EN_GPIO03_SHIFT 7
#define PADCTRLREG_GPIO03_HYS_EN_GPIO03_MASK 0x00000080
#define PADCTRLREG_GPIO03_PDN_GPIO03_SHIFT 6
#define PADCTRLREG_GPIO03_PDN_GPIO03_MASK 0x00000040
#define PADCTRLREG_GPIO03_PUP_GPIO03_SHIFT 5
#define PADCTRLREG_GPIO03_PUP_GPIO03_MASK 0x00000020
#define PADCTRLREG_GPIO03_SRC_GPIO03_SHIFT 4
#define PADCTRLREG_GPIO03_SRC_GPIO03_MASK 0x00000010
#define PADCTRLREG_GPIO03_IND_GPIO03_SHIFT 3
#define PADCTRLREG_GPIO03_IND_GPIO03_MASK 0x00000008
#define PADCTRLREG_GPIO03_SEL_2_GPIO03_SHIFT 2
#define PADCTRLREG_GPIO03_SEL_2_GPIO03_MASK 0x00000004
#define PADCTRLREG_GPIO03_SEL_1_GPIO03_SHIFT 1
#define PADCTRLREG_GPIO03_SEL_1_GPIO03_MASK 0x00000002
#define PADCTRLREG_GPIO03_SEL_0_GPIO03_SHIFT 0
#define PADCTRLREG_GPIO03_SEL_0_GPIO03_MASK 0x00000001
#define PADCTRLREG_GPIO04_OFFSET 0x0000004C
#define PADCTRLREG_GPIO04_TYPE UInt32
#define PADCTRLREG_GPIO04_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO04_PINSEL_GPIO04_SHIFT 8
#define PADCTRLREG_GPIO04_PINSEL_GPIO04_MASK 0x00000700
#define PADCTRLREG_GPIO04_HYS_EN_GPIO04_SHIFT 7
#define PADCTRLREG_GPIO04_HYS_EN_GPIO04_MASK 0x00000080
#define PADCTRLREG_GPIO04_PDN_GPIO04_SHIFT 6
#define PADCTRLREG_GPIO04_PDN_GPIO04_MASK 0x00000040
#define PADCTRLREG_GPIO04_PUP_GPIO04_SHIFT 5
#define PADCTRLREG_GPIO04_PUP_GPIO04_MASK 0x00000020
#define PADCTRLREG_GPIO04_SRC_GPIO04_SHIFT 4
#define PADCTRLREG_GPIO04_SRC_GPIO04_MASK 0x00000010
#define PADCTRLREG_GPIO04_IND_GPIO04_SHIFT 3
#define PADCTRLREG_GPIO04_IND_GPIO04_MASK 0x00000008
#define PADCTRLREG_GPIO04_SEL_2_GPIO04_SHIFT 2
#define PADCTRLREG_GPIO04_SEL_2_GPIO04_MASK 0x00000004
#define PADCTRLREG_GPIO04_SEL_1_GPIO04_SHIFT 1
#define PADCTRLREG_GPIO04_SEL_1_GPIO04_MASK 0x00000002
#define PADCTRLREG_GPIO04_SEL_0_GPIO04_SHIFT 0
#define PADCTRLREG_GPIO04_SEL_0_GPIO04_MASK 0x00000001
#define PADCTRLREG_GPIO05_OFFSET 0x00000050
#define PADCTRLREG_GPIO05_TYPE UInt32
#define PADCTRLREG_GPIO05_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO05_PINSEL_GPIO05_SHIFT 8
#define PADCTRLREG_GPIO05_PINSEL_GPIO05_MASK 0x00000700
#define PADCTRLREG_GPIO05_HYS_EN_GPIO05_SHIFT 7
#define PADCTRLREG_GPIO05_HYS_EN_GPIO05_MASK 0x00000080
#define PADCTRLREG_GPIO05_PDN_GPIO05_SHIFT 6
#define PADCTRLREG_GPIO05_PDN_GPIO05_MASK 0x00000040
#define PADCTRLREG_GPIO05_PUP_GPIO05_SHIFT 5
#define PADCTRLREG_GPIO05_PUP_GPIO05_MASK 0x00000020
#define PADCTRLREG_GPIO05_SRC_GPIO05_SHIFT 4
#define PADCTRLREG_GPIO05_SRC_GPIO05_MASK 0x00000010
#define PADCTRLREG_GPIO05_IND_GPIO05_SHIFT 3
#define PADCTRLREG_GPIO05_IND_GPIO05_MASK 0x00000008
#define PADCTRLREG_GPIO05_SEL_2_GPIO05_SHIFT 2
#define PADCTRLREG_GPIO05_SEL_2_GPIO05_MASK 0x00000004
#define PADCTRLREG_GPIO05_SEL_1_GPIO05_SHIFT 1
#define PADCTRLREG_GPIO05_SEL_1_GPIO05_MASK 0x00000002
#define PADCTRLREG_GPIO05_SEL_0_GPIO05_SHIFT 0
#define PADCTRLREG_GPIO05_SEL_0_GPIO05_MASK 0x00000001
#define PADCTRLREG_GPIO06_OFFSET 0x00000054
#define PADCTRLREG_GPIO06_TYPE UInt32
#define PADCTRLREG_GPIO06_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO06_PINSEL_GPIO06_SHIFT 8
#define PADCTRLREG_GPIO06_PINSEL_GPIO06_MASK 0x00000700
#define PADCTRLREG_GPIO06_HYS_EN_GPIO06_SHIFT 7
#define PADCTRLREG_GPIO06_HYS_EN_GPIO06_MASK 0x00000080
#define PADCTRLREG_GPIO06_PDN_GPIO06_SHIFT 6
#define PADCTRLREG_GPIO06_PDN_GPIO06_MASK 0x00000040
#define PADCTRLREG_GPIO06_PUP_GPIO06_SHIFT 5
#define PADCTRLREG_GPIO06_PUP_GPIO06_MASK 0x00000020
#define PADCTRLREG_GPIO06_SRC_GPIO06_SHIFT 4
#define PADCTRLREG_GPIO06_SRC_GPIO06_MASK 0x00000010
#define PADCTRLREG_GPIO06_IND_GPIO06_SHIFT 3
#define PADCTRLREG_GPIO06_IND_GPIO06_MASK 0x00000008
#define PADCTRLREG_GPIO06_SEL_2_GPIO06_SHIFT 2
#define PADCTRLREG_GPIO06_SEL_2_GPIO06_MASK 0x00000004
#define PADCTRLREG_GPIO06_SEL_1_GPIO06_SHIFT 1
#define PADCTRLREG_GPIO06_SEL_1_GPIO06_MASK 0x00000002
#define PADCTRLREG_GPIO06_SEL_0_GPIO06_SHIFT 0
#define PADCTRLREG_GPIO06_SEL_0_GPIO06_MASK 0x00000001
#define PADCTRLREG_GPIO07_OFFSET 0x00000058
#define PADCTRLREG_GPIO07_TYPE UInt32
#define PADCTRLREG_GPIO07_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO07_PINSEL_GPIO07_SHIFT 8
#define PADCTRLREG_GPIO07_PINSEL_GPIO07_MASK 0x00000700
#define PADCTRLREG_GPIO07_HYS_EN_GPIO07_SHIFT 7
#define PADCTRLREG_GPIO07_HYS_EN_GPIO07_MASK 0x00000080
#define PADCTRLREG_GPIO07_PDN_GPIO07_SHIFT 6
#define PADCTRLREG_GPIO07_PDN_GPIO07_MASK 0x00000040
#define PADCTRLREG_GPIO07_PUP_GPIO07_SHIFT 5
#define PADCTRLREG_GPIO07_PUP_GPIO07_MASK 0x00000020
#define PADCTRLREG_GPIO07_SRC_GPIO07_SHIFT 4
#define PADCTRLREG_GPIO07_SRC_GPIO07_MASK 0x00000010
#define PADCTRLREG_GPIO07_IND_GPIO07_SHIFT 3
#define PADCTRLREG_GPIO07_IND_GPIO07_MASK 0x00000008
#define PADCTRLREG_GPIO07_SEL_2_GPIO07_SHIFT 2
#define PADCTRLREG_GPIO07_SEL_2_GPIO07_MASK 0x00000004
#define PADCTRLREG_GPIO07_SEL_1_GPIO07_SHIFT 1
#define PADCTRLREG_GPIO07_SEL_1_GPIO07_MASK 0x00000002
#define PADCTRLREG_GPIO07_SEL_0_GPIO07_SHIFT 0
#define PADCTRLREG_GPIO07_SEL_0_GPIO07_MASK 0x00000001
#define PADCTRLREG_GPIO08_OFFSET 0x0000005C
#define PADCTRLREG_GPIO08_TYPE UInt32
#define PADCTRLREG_GPIO08_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO08_PINSEL_GPIO08_SHIFT 8
#define PADCTRLREG_GPIO08_PINSEL_GPIO08_MASK 0x00000700
#define PADCTRLREG_GPIO08_HYS_EN_GPIO08_SHIFT 7
#define PADCTRLREG_GPIO08_HYS_EN_GPIO08_MASK 0x00000080
#define PADCTRLREG_GPIO08_PDN_GPIO08_SHIFT 6
#define PADCTRLREG_GPIO08_PDN_GPIO08_MASK 0x00000040
#define PADCTRLREG_GPIO08_PUP_GPIO08_SHIFT 5
#define PADCTRLREG_GPIO08_PUP_GPIO08_MASK 0x00000020
#define PADCTRLREG_GPIO08_SRC_GPIO08_SHIFT 4
#define PADCTRLREG_GPIO08_SRC_GPIO08_MASK 0x00000010
#define PADCTRLREG_GPIO08_IND_GPIO08_SHIFT 3
#define PADCTRLREG_GPIO08_IND_GPIO08_MASK 0x00000008
#define PADCTRLREG_GPIO08_SEL_2_GPIO08_SHIFT 2
#define PADCTRLREG_GPIO08_SEL_2_GPIO08_MASK 0x00000004
#define PADCTRLREG_GPIO08_SEL_1_GPIO08_SHIFT 1
#define PADCTRLREG_GPIO08_SEL_1_GPIO08_MASK 0x00000002
#define PADCTRLREG_GPIO08_SEL_0_GPIO08_SHIFT 0
#define PADCTRLREG_GPIO08_SEL_0_GPIO08_MASK 0x00000001
#define PADCTRLREG_GPIO09_OFFSET 0x00000060
#define PADCTRLREG_GPIO09_TYPE UInt32
#define PADCTRLREG_GPIO09_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO09_PINSEL_GPIO09_SHIFT 8
#define PADCTRLREG_GPIO09_PINSEL_GPIO09_MASK 0x00000700
#define PADCTRLREG_GPIO09_HYS_EN_GPIO09_SHIFT 7
#define PADCTRLREG_GPIO09_HYS_EN_GPIO09_MASK 0x00000080
#define PADCTRLREG_GPIO09_PDN_GPIO09_SHIFT 6
#define PADCTRLREG_GPIO09_PDN_GPIO09_MASK 0x00000040
#define PADCTRLREG_GPIO09_PUP_GPIO09_SHIFT 5
#define PADCTRLREG_GPIO09_PUP_GPIO09_MASK 0x00000020
#define PADCTRLREG_GPIO09_SRC_GPIO09_SHIFT 4
#define PADCTRLREG_GPIO09_SRC_GPIO09_MASK 0x00000010
#define PADCTRLREG_GPIO09_IND_GPIO09_SHIFT 3
#define PADCTRLREG_GPIO09_IND_GPIO09_MASK 0x00000008
#define PADCTRLREG_GPIO09_SEL_2_GPIO09_SHIFT 2
#define PADCTRLREG_GPIO09_SEL_2_GPIO09_MASK 0x00000004
#define PADCTRLREG_GPIO09_SEL_1_GPIO09_SHIFT 1
#define PADCTRLREG_GPIO09_SEL_1_GPIO09_MASK 0x00000002
#define PADCTRLREG_GPIO09_SEL_0_GPIO09_SHIFT 0
#define PADCTRLREG_GPIO09_SEL_0_GPIO09_MASK 0x00000001
#define PADCTRLREG_GPIO10_OFFSET 0x00000064
#define PADCTRLREG_GPIO10_TYPE UInt32
#define PADCTRLREG_GPIO10_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO10_PINSEL_GPIO10_SHIFT 8
#define PADCTRLREG_GPIO10_PINSEL_GPIO10_MASK 0x00000700
#define PADCTRLREG_GPIO10_HYS_EN_GPIO10_SHIFT 7
#define PADCTRLREG_GPIO10_HYS_EN_GPIO10_MASK 0x00000080
#define PADCTRLREG_GPIO10_PDN_GPIO10_SHIFT 6
#define PADCTRLREG_GPIO10_PDN_GPIO10_MASK 0x00000040
#define PADCTRLREG_GPIO10_PUP_GPIO10_SHIFT 5
#define PADCTRLREG_GPIO10_PUP_GPIO10_MASK 0x00000020
#define PADCTRLREG_GPIO10_SRC_GPIO10_SHIFT 4
#define PADCTRLREG_GPIO10_SRC_GPIO10_MASK 0x00000010
#define PADCTRLREG_GPIO10_IND_GPIO10_SHIFT 3
#define PADCTRLREG_GPIO10_IND_GPIO10_MASK 0x00000008
#define PADCTRLREG_GPIO10_SEL_2_GPIO10_SHIFT 2
#define PADCTRLREG_GPIO10_SEL_2_GPIO10_MASK 0x00000004
#define PADCTRLREG_GPIO10_SEL_1_GPIO10_SHIFT 1
#define PADCTRLREG_GPIO10_SEL_1_GPIO10_MASK 0x00000002
#define PADCTRLREG_GPIO10_SEL_0_GPIO10_SHIFT 0
#define PADCTRLREG_GPIO10_SEL_0_GPIO10_MASK 0x00000001
#define PADCTRLREG_GPIO11_OFFSET 0x00000068
#define PADCTRLREG_GPIO11_TYPE UInt32
#define PADCTRLREG_GPIO11_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO11_PINSEL_GPIO11_SHIFT 8
#define PADCTRLREG_GPIO11_PINSEL_GPIO11_MASK 0x00000700
#define PADCTRLREG_GPIO11_HYS_EN_GPIO11_SHIFT 7
#define PADCTRLREG_GPIO11_HYS_EN_GPIO11_MASK 0x00000080
#define PADCTRLREG_GPIO11_PDN_GPIO11_SHIFT 6
#define PADCTRLREG_GPIO11_PDN_GPIO11_MASK 0x00000040
#define PADCTRLREG_GPIO11_PUP_GPIO11_SHIFT 5
#define PADCTRLREG_GPIO11_PUP_GPIO11_MASK 0x00000020
#define PADCTRLREG_GPIO11_SRC_GPIO11_SHIFT 4
#define PADCTRLREG_GPIO11_SRC_GPIO11_MASK 0x00000010
#define PADCTRLREG_GPIO11_IND_GPIO11_SHIFT 3
#define PADCTRLREG_GPIO11_IND_GPIO11_MASK 0x00000008
#define PADCTRLREG_GPIO11_SEL_2_GPIO11_SHIFT 2
#define PADCTRLREG_GPIO11_SEL_2_GPIO11_MASK 0x00000004
#define PADCTRLREG_GPIO11_SEL_1_GPIO11_SHIFT 1
#define PADCTRLREG_GPIO11_SEL_1_GPIO11_MASK 0x00000002
#define PADCTRLREG_GPIO11_SEL_0_GPIO11_SHIFT 0
#define PADCTRLREG_GPIO11_SEL_0_GPIO11_MASK 0x00000001
#define PADCTRLREG_GPIO12_OFFSET 0x0000006C
#define PADCTRLREG_GPIO12_TYPE UInt32
#define PADCTRLREG_GPIO12_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO12_PINSEL_GPIO12_SHIFT 8
#define PADCTRLREG_GPIO12_PINSEL_GPIO12_MASK 0x00000700
#define PADCTRLREG_GPIO12_HYS_EN_GPIO12_SHIFT 7
#define PADCTRLREG_GPIO12_HYS_EN_GPIO12_MASK 0x00000080
#define PADCTRLREG_GPIO12_PDN_GPIO12_SHIFT 6
#define PADCTRLREG_GPIO12_PDN_GPIO12_MASK 0x00000040
#define PADCTRLREG_GPIO12_PUP_GPIO12_SHIFT 5
#define PADCTRLREG_GPIO12_PUP_GPIO12_MASK 0x00000020
#define PADCTRLREG_GPIO12_SRC_GPIO12_SHIFT 4
#define PADCTRLREG_GPIO12_SRC_GPIO12_MASK 0x00000010
#define PADCTRLREG_GPIO12_IND_GPIO12_SHIFT 3
#define PADCTRLREG_GPIO12_IND_GPIO12_MASK 0x00000008
#define PADCTRLREG_GPIO12_SEL_2_GPIO12_SHIFT 2
#define PADCTRLREG_GPIO12_SEL_2_GPIO12_MASK 0x00000004
#define PADCTRLREG_GPIO12_SEL_1_GPIO12_SHIFT 1
#define PADCTRLREG_GPIO12_SEL_1_GPIO12_MASK 0x00000002
#define PADCTRLREG_GPIO12_SEL_0_GPIO12_SHIFT 0
#define PADCTRLREG_GPIO12_SEL_0_GPIO12_MASK 0x00000001
#define PADCTRLREG_GPIO13_OFFSET 0x00000070
#define PADCTRLREG_GPIO13_TYPE UInt32
#define PADCTRLREG_GPIO13_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO13_PINSEL_GPIO13_SHIFT 8
#define PADCTRLREG_GPIO13_PINSEL_GPIO13_MASK 0x00000700
#define PADCTRLREG_GPIO13_HYS_EN_GPIO13_SHIFT 7
#define PADCTRLREG_GPIO13_HYS_EN_GPIO13_MASK 0x00000080
#define PADCTRLREG_GPIO13_PDN_GPIO13_SHIFT 6
#define PADCTRLREG_GPIO13_PDN_GPIO13_MASK 0x00000040
#define PADCTRLREG_GPIO13_PUP_GPIO13_SHIFT 5
#define PADCTRLREG_GPIO13_PUP_GPIO13_MASK 0x00000020
#define PADCTRLREG_GPIO13_SRC_GPIO13_SHIFT 4
#define PADCTRLREG_GPIO13_SRC_GPIO13_MASK 0x00000010
#define PADCTRLREG_GPIO13_IND_GPIO13_SHIFT 3
#define PADCTRLREG_GPIO13_IND_GPIO13_MASK 0x00000008
#define PADCTRLREG_GPIO13_SEL_2_GPIO13_SHIFT 2
#define PADCTRLREG_GPIO13_SEL_2_GPIO13_MASK 0x00000004
#define PADCTRLREG_GPIO13_SEL_1_GPIO13_SHIFT 1
#define PADCTRLREG_GPIO13_SEL_1_GPIO13_MASK 0x00000002
#define PADCTRLREG_GPIO13_SEL_0_GPIO13_SHIFT 0
#define PADCTRLREG_GPIO13_SEL_0_GPIO13_MASK 0x00000001
#define PADCTRLREG_GPIO14_OFFSET 0x00000074
#define PADCTRLREG_GPIO14_TYPE UInt32
#define PADCTRLREG_GPIO14_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO14_PINSEL_GPIO14_SHIFT 8
#define PADCTRLREG_GPIO14_PINSEL_GPIO14_MASK 0x00000700
#define PADCTRLREG_GPIO14_HYS_EN_GPIO14_SHIFT 7
#define PADCTRLREG_GPIO14_HYS_EN_GPIO14_MASK 0x00000080
#define PADCTRLREG_GPIO14_PDN_GPIO14_SHIFT 6
#define PADCTRLREG_GPIO14_PDN_GPIO14_MASK 0x00000040
#define PADCTRLREG_GPIO14_PUP_GPIO14_SHIFT 5
#define PADCTRLREG_GPIO14_PUP_GPIO14_MASK 0x00000020
#define PADCTRLREG_GPIO14_SRC_GPIO14_SHIFT 4
#define PADCTRLREG_GPIO14_SRC_GPIO14_MASK 0x00000010
#define PADCTRLREG_GPIO14_IND_GPIO14_SHIFT 3
#define PADCTRLREG_GPIO14_IND_GPIO14_MASK 0x00000008
#define PADCTRLREG_GPIO14_SEL_2_GPIO14_SHIFT 2
#define PADCTRLREG_GPIO14_SEL_2_GPIO14_MASK 0x00000004
#define PADCTRLREG_GPIO14_SEL_1_GPIO14_SHIFT 1
#define PADCTRLREG_GPIO14_SEL_1_GPIO14_MASK 0x00000002
#define PADCTRLREG_GPIO14_SEL_0_GPIO14_SHIFT 0
#define PADCTRLREG_GPIO14_SEL_0_GPIO14_MASK 0x00000001
#define PADCTRLREG_GPIO15_OFFSET 0x00000078
#define PADCTRLREG_GPIO15_TYPE UInt32
#define PADCTRLREG_GPIO15_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO15_PINSEL_GPIO15_SHIFT 8
#define PADCTRLREG_GPIO15_PINSEL_GPIO15_MASK 0x00000700
#define PADCTRLREG_GPIO15_HYS_EN_GPIO15_SHIFT 7
#define PADCTRLREG_GPIO15_HYS_EN_GPIO15_MASK 0x00000080
#define PADCTRLREG_GPIO15_PDN_GPIO15_SHIFT 6
#define PADCTRLREG_GPIO15_PDN_GPIO15_MASK 0x00000040
#define PADCTRLREG_GPIO15_PUP_GPIO15_SHIFT 5
#define PADCTRLREG_GPIO15_PUP_GPIO15_MASK 0x00000020
#define PADCTRLREG_GPIO15_SRC_GPIO15_SHIFT 4
#define PADCTRLREG_GPIO15_SRC_GPIO15_MASK 0x00000010
#define PADCTRLREG_GPIO15_IND_GPIO15_SHIFT 3
#define PADCTRLREG_GPIO15_IND_GPIO15_MASK 0x00000008
#define PADCTRLREG_GPIO15_SEL_2_GPIO15_SHIFT 2
#define PADCTRLREG_GPIO15_SEL_2_GPIO15_MASK 0x00000004
#define PADCTRLREG_GPIO15_SEL_1_GPIO15_SHIFT 1
#define PADCTRLREG_GPIO15_SEL_1_GPIO15_MASK 0x00000002
#define PADCTRLREG_GPIO15_SEL_0_GPIO15_SHIFT 0
#define PADCTRLREG_GPIO15_SEL_0_GPIO15_MASK 0x00000001
#define PADCTRLREG_GPIO16_OFFSET 0x0000007C
#define PADCTRLREG_GPIO16_TYPE UInt32
#define PADCTRLREG_GPIO16_RESERVED_MASK 0xFFFFF8C7
#define PADCTRLREG_GPIO16_PINSEL_GPIO16_SHIFT 8
#define PADCTRLREG_GPIO16_PINSEL_GPIO16_MASK 0x00000700
#define PADCTRLREG_GPIO16_PUP_GPIO16_SHIFT 5
#define PADCTRLREG_GPIO16_PUP_GPIO16_MASK 0x00000020
#define PADCTRLREG_GPIO16_SRC_GPIO16_SHIFT 4
#define PADCTRLREG_GPIO16_SRC_GPIO16_MASK 0x00000010
#define PADCTRLREG_GPIO16_IND_GPIO16_SHIFT 3
#define PADCTRLREG_GPIO16_IND_GPIO16_MASK 0x00000008
#define PADCTRLREG_GPIO17_OFFSET 0x00000080
#define PADCTRLREG_GPIO17_TYPE UInt32
#define PADCTRLREG_GPIO17_RESERVED_MASK 0xFFFFF8C7
#define PADCTRLREG_GPIO17_PINSEL_GPIO17_SHIFT 8
#define PADCTRLREG_GPIO17_PINSEL_GPIO17_MASK 0x00000700
#define PADCTRLREG_GPIO17_PUP_GPIO17_SHIFT 5
#define PADCTRLREG_GPIO17_PUP_GPIO17_MASK 0x00000020
#define PADCTRLREG_GPIO17_SRC_GPIO17_SHIFT 4
#define PADCTRLREG_GPIO17_SRC_GPIO17_MASK 0x00000010
#define PADCTRLREG_GPIO17_IND_GPIO17_SHIFT 3
#define PADCTRLREG_GPIO17_IND_GPIO17_MASK 0x00000008
#define PADCTRLREG_GPIO18_OFFSET 0x00000084
#define PADCTRLREG_GPIO18_TYPE UInt32
#define PADCTRLREG_GPIO18_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO18_PINSEL_GPIO18_SHIFT 8
#define PADCTRLREG_GPIO18_PINSEL_GPIO18_MASK 0x00000700
#define PADCTRLREG_GPIO18_HYS_EN_GPIO18_SHIFT 7
#define PADCTRLREG_GPIO18_HYS_EN_GPIO18_MASK 0x00000080
#define PADCTRLREG_GPIO18_PDN_GPIO18_SHIFT 6
#define PADCTRLREG_GPIO18_PDN_GPIO18_MASK 0x00000040
#define PADCTRLREG_GPIO18_PUP_GPIO18_SHIFT 5
#define PADCTRLREG_GPIO18_PUP_GPIO18_MASK 0x00000020
#define PADCTRLREG_GPIO18_SRC_GPIO18_SHIFT 4
#define PADCTRLREG_GPIO18_SRC_GPIO18_MASK 0x00000010
#define PADCTRLREG_GPIO18_IND_GPIO18_SHIFT 3
#define PADCTRLREG_GPIO18_IND_GPIO18_MASK 0x00000008
#define PADCTRLREG_GPIO18_SEL_2_GPIO18_SHIFT 2
#define PADCTRLREG_GPIO18_SEL_2_GPIO18_MASK 0x00000004
#define PADCTRLREG_GPIO18_SEL_1_GPIO18_SHIFT 1
#define PADCTRLREG_GPIO18_SEL_1_GPIO18_MASK 0x00000002
#define PADCTRLREG_GPIO18_SEL_0_GPIO18_SHIFT 0
#define PADCTRLREG_GPIO18_SEL_0_GPIO18_MASK 0x00000001
#define PADCTRLREG_GPIO19_OFFSET 0x00000088
#define PADCTRLREG_GPIO19_TYPE UInt32
#define PADCTRLREG_GPIO19_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO19_PINSEL_GPIO19_SHIFT 8
#define PADCTRLREG_GPIO19_PINSEL_GPIO19_MASK 0x00000700
#define PADCTRLREG_GPIO19_HYS_EN_GPIO19_SHIFT 7
#define PADCTRLREG_GPIO19_HYS_EN_GPIO19_MASK 0x00000080
#define PADCTRLREG_GPIO19_PDN_GPIO19_SHIFT 6
#define PADCTRLREG_GPIO19_PDN_GPIO19_MASK 0x00000040
#define PADCTRLREG_GPIO19_PUP_GPIO19_SHIFT 5
#define PADCTRLREG_GPIO19_PUP_GPIO19_MASK 0x00000020
#define PADCTRLREG_GPIO19_SRC_GPIO19_SHIFT 4
#define PADCTRLREG_GPIO19_SRC_GPIO19_MASK 0x00000010
#define PADCTRLREG_GPIO19_IND_GPIO19_SHIFT 3
#define PADCTRLREG_GPIO19_IND_GPIO19_MASK 0x00000008
#define PADCTRLREG_GPIO19_SEL_2_GPIO19_SHIFT 2
#define PADCTRLREG_GPIO19_SEL_2_GPIO19_MASK 0x00000004
#define PADCTRLREG_GPIO19_SEL_1_GPIO19_SHIFT 1
#define PADCTRLREG_GPIO19_SEL_1_GPIO19_MASK 0x00000002
#define PADCTRLREG_GPIO19_SEL_0_GPIO19_SHIFT 0
#define PADCTRLREG_GPIO19_SEL_0_GPIO19_MASK 0x00000001
#define PADCTRLREG_GPIO20_OFFSET 0x0000008C
#define PADCTRLREG_GPIO20_TYPE UInt32
#define PADCTRLREG_GPIO20_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO20_PINSEL_GPIO20_SHIFT 8
#define PADCTRLREG_GPIO20_PINSEL_GPIO20_MASK 0x00000700
#define PADCTRLREG_GPIO20_HYS_EN_GPIO20_SHIFT 7
#define PADCTRLREG_GPIO20_HYS_EN_GPIO20_MASK 0x00000080
#define PADCTRLREG_GPIO20_PDN_GPIO20_SHIFT 6
#define PADCTRLREG_GPIO20_PDN_GPIO20_MASK 0x00000040
#define PADCTRLREG_GPIO20_PUP_GPIO20_SHIFT 5
#define PADCTRLREG_GPIO20_PUP_GPIO20_MASK 0x00000020
#define PADCTRLREG_GPIO20_SRC_GPIO20_SHIFT 4
#define PADCTRLREG_GPIO20_SRC_GPIO20_MASK 0x00000010
#define PADCTRLREG_GPIO20_IND_GPIO20_SHIFT 3
#define PADCTRLREG_GPIO20_IND_GPIO20_MASK 0x00000008
#define PADCTRLREG_GPIO20_SEL_2_GPIO20_SHIFT 2
#define PADCTRLREG_GPIO20_SEL_2_GPIO20_MASK 0x00000004
#define PADCTRLREG_GPIO20_SEL_1_GPIO20_SHIFT 1
#define PADCTRLREG_GPIO20_SEL_1_GPIO20_MASK 0x00000002
#define PADCTRLREG_GPIO20_SEL_0_GPIO20_SHIFT 0
#define PADCTRLREG_GPIO20_SEL_0_GPIO20_MASK 0x00000001
#define PADCTRLREG_GPIO21_OFFSET 0x00000090
#define PADCTRLREG_GPIO21_TYPE UInt32
#define PADCTRLREG_GPIO21_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO21_PINSEL_GPIO21_SHIFT 8
#define PADCTRLREG_GPIO21_PINSEL_GPIO21_MASK 0x00000700
#define PADCTRLREG_GPIO21_HYS_EN_GPIO21_SHIFT 7
#define PADCTRLREG_GPIO21_HYS_EN_GPIO21_MASK 0x00000080
#define PADCTRLREG_GPIO21_PDN_GPIO21_SHIFT 6
#define PADCTRLREG_GPIO21_PDN_GPIO21_MASK 0x00000040
#define PADCTRLREG_GPIO21_PUP_GPIO21_SHIFT 5
#define PADCTRLREG_GPIO21_PUP_GPIO21_MASK 0x00000020
#define PADCTRLREG_GPIO21_SRC_GPIO21_SHIFT 4
#define PADCTRLREG_GPIO21_SRC_GPIO21_MASK 0x00000010
#define PADCTRLREG_GPIO21_IND_GPIO21_SHIFT 3
#define PADCTRLREG_GPIO21_IND_GPIO21_MASK 0x00000008
#define PADCTRLREG_GPIO21_SEL_2_GPIO21_SHIFT 2
#define PADCTRLREG_GPIO21_SEL_2_GPIO21_MASK 0x00000004
#define PADCTRLREG_GPIO21_SEL_1_GPIO21_SHIFT 1
#define PADCTRLREG_GPIO21_SEL_1_GPIO21_MASK 0x00000002
#define PADCTRLREG_GPIO21_SEL_0_GPIO21_SHIFT 0
#define PADCTRLREG_GPIO21_SEL_0_GPIO21_MASK 0x00000001
#define PADCTRLREG_GPIO22_OFFSET 0x00000094
#define PADCTRLREG_GPIO22_TYPE UInt32
#define PADCTRLREG_GPIO22_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO22_PINSEL_GPIO22_SHIFT 8
#define PADCTRLREG_GPIO22_PINSEL_GPIO22_MASK 0x00000700
#define PADCTRLREG_GPIO22_HYS_EN_GPIO22_SHIFT 7
#define PADCTRLREG_GPIO22_HYS_EN_GPIO22_MASK 0x00000080
#define PADCTRLREG_GPIO22_PDN_GPIO22_SHIFT 6
#define PADCTRLREG_GPIO22_PDN_GPIO22_MASK 0x00000040
#define PADCTRLREG_GPIO22_PUP_GPIO22_SHIFT 5
#define PADCTRLREG_GPIO22_PUP_GPIO22_MASK 0x00000020
#define PADCTRLREG_GPIO22_SRC_GPIO22_SHIFT 4
#define PADCTRLREG_GPIO22_SRC_GPIO22_MASK 0x00000010
#define PADCTRLREG_GPIO22_IND_GPIO22_SHIFT 3
#define PADCTRLREG_GPIO22_IND_GPIO22_MASK 0x00000008
#define PADCTRLREG_GPIO22_SEL_2_GPIO22_SHIFT 2
#define PADCTRLREG_GPIO22_SEL_2_GPIO22_MASK 0x00000004
#define PADCTRLREG_GPIO22_SEL_1_GPIO22_SHIFT 1
#define PADCTRLREG_GPIO22_SEL_1_GPIO22_MASK 0x00000002
#define PADCTRLREG_GPIO22_SEL_0_GPIO22_SHIFT 0
#define PADCTRLREG_GPIO22_SEL_0_GPIO22_MASK 0x00000001
#define PADCTRLREG_GPIO23_OFFSET 0x00000098
#define PADCTRLREG_GPIO23_TYPE UInt32
#define PADCTRLREG_GPIO23_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO23_PINSEL_GPIO23_SHIFT 8
#define PADCTRLREG_GPIO23_PINSEL_GPIO23_MASK 0x00000700
#define PADCTRLREG_GPIO23_HYS_EN_GPIO23_SHIFT 7
#define PADCTRLREG_GPIO23_HYS_EN_GPIO23_MASK 0x00000080
#define PADCTRLREG_GPIO23_PDN_GPIO23_SHIFT 6
#define PADCTRLREG_GPIO23_PDN_GPIO23_MASK 0x00000040
#define PADCTRLREG_GPIO23_PUP_GPIO23_SHIFT 5
#define PADCTRLREG_GPIO23_PUP_GPIO23_MASK 0x00000020
#define PADCTRLREG_GPIO23_SRC_GPIO23_SHIFT 4
#define PADCTRLREG_GPIO23_SRC_GPIO23_MASK 0x00000010
#define PADCTRLREG_GPIO23_IND_GPIO23_SHIFT 3
#define PADCTRLREG_GPIO23_IND_GPIO23_MASK 0x00000008
#define PADCTRLREG_GPIO23_SEL_2_GPIO23_SHIFT 2
#define PADCTRLREG_GPIO23_SEL_2_GPIO23_MASK 0x00000004
#define PADCTRLREG_GPIO23_SEL_1_GPIO23_SHIFT 1
#define PADCTRLREG_GPIO23_SEL_1_GPIO23_MASK 0x00000002
#define PADCTRLREG_GPIO23_SEL_0_GPIO23_SHIFT 0
#define PADCTRLREG_GPIO23_SEL_0_GPIO23_MASK 0x00000001
#define PADCTRLREG_GPIO24_OFFSET 0x0000009C
#define PADCTRLREG_GPIO24_TYPE UInt32
#define PADCTRLREG_GPIO24_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO24_PINSEL_GPIO24_SHIFT 8
#define PADCTRLREG_GPIO24_PINSEL_GPIO24_MASK 0x00000700
#define PADCTRLREG_GPIO24_HYS_EN_GPIO24_SHIFT 7
#define PADCTRLREG_GPIO24_HYS_EN_GPIO24_MASK 0x00000080
#define PADCTRLREG_GPIO24_PDN_GPIO24_SHIFT 6
#define PADCTRLREG_GPIO24_PDN_GPIO24_MASK 0x00000040
#define PADCTRLREG_GPIO24_PUP_GPIO24_SHIFT 5
#define PADCTRLREG_GPIO24_PUP_GPIO24_MASK 0x00000020
#define PADCTRLREG_GPIO24_SRC_GPIO24_SHIFT 4
#define PADCTRLREG_GPIO24_SRC_GPIO24_MASK 0x00000010
#define PADCTRLREG_GPIO24_IND_GPIO24_SHIFT 3
#define PADCTRLREG_GPIO24_IND_GPIO24_MASK 0x00000008
#define PADCTRLREG_GPIO24_SEL_2_GPIO24_SHIFT 2
#define PADCTRLREG_GPIO24_SEL_2_GPIO24_MASK 0x00000004
#define PADCTRLREG_GPIO24_SEL_1_GPIO24_SHIFT 1
#define PADCTRLREG_GPIO24_SEL_1_GPIO24_MASK 0x00000002
#define PADCTRLREG_GPIO24_SEL_0_GPIO24_SHIFT 0
#define PADCTRLREG_GPIO24_SEL_0_GPIO24_MASK 0x00000001
#define PADCTRLREG_GPIO25_OFFSET 0x000000A0
#define PADCTRLREG_GPIO25_TYPE UInt32
#define PADCTRLREG_GPIO25_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO25_PINSEL_GPIO25_SHIFT 8
#define PADCTRLREG_GPIO25_PINSEL_GPIO25_MASK 0x00000700
#define PADCTRLREG_GPIO25_HYS_EN_GPIO25_SHIFT 7
#define PADCTRLREG_GPIO25_HYS_EN_GPIO25_MASK 0x00000080
#define PADCTRLREG_GPIO25_PDN_GPIO25_SHIFT 6
#define PADCTRLREG_GPIO25_PDN_GPIO25_MASK 0x00000040
#define PADCTRLREG_GPIO25_PUP_GPIO25_SHIFT 5
#define PADCTRLREG_GPIO25_PUP_GPIO25_MASK 0x00000020
#define PADCTRLREG_GPIO25_SRC_GPIO25_SHIFT 4
#define PADCTRLREG_GPIO25_SRC_GPIO25_MASK 0x00000010
#define PADCTRLREG_GPIO25_IND_GPIO25_SHIFT 3
#define PADCTRLREG_GPIO25_IND_GPIO25_MASK 0x00000008
#define PADCTRLREG_GPIO25_SEL_2_GPIO25_SHIFT 2
#define PADCTRLREG_GPIO25_SEL_2_GPIO25_MASK 0x00000004
#define PADCTRLREG_GPIO25_SEL_1_GPIO25_SHIFT 1
#define PADCTRLREG_GPIO25_SEL_1_GPIO25_MASK 0x00000002
#define PADCTRLREG_GPIO25_SEL_0_GPIO25_SHIFT 0
#define PADCTRLREG_GPIO25_SEL_0_GPIO25_MASK 0x00000001
#define PADCTRLREG_GPIO26_OFFSET 0x000000A4
#define PADCTRLREG_GPIO26_TYPE UInt32
#define PADCTRLREG_GPIO26_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO26_PINSEL_GPIO26_SHIFT 8
#define PADCTRLREG_GPIO26_PINSEL_GPIO26_MASK 0x00000700
#define PADCTRLREG_GPIO26_HYS_EN_GPIO26_SHIFT 7
#define PADCTRLREG_GPIO26_HYS_EN_GPIO26_MASK 0x00000080
#define PADCTRLREG_GPIO26_PDN_GPIO26_SHIFT 6
#define PADCTRLREG_GPIO26_PDN_GPIO26_MASK 0x00000040
#define PADCTRLREG_GPIO26_PUP_GPIO26_SHIFT 5
#define PADCTRLREG_GPIO26_PUP_GPIO26_MASK 0x00000020
#define PADCTRLREG_GPIO26_SRC_GPIO26_SHIFT 4
#define PADCTRLREG_GPIO26_SRC_GPIO26_MASK 0x00000010
#define PADCTRLREG_GPIO26_IND_GPIO26_SHIFT 3
#define PADCTRLREG_GPIO26_IND_GPIO26_MASK 0x00000008
#define PADCTRLREG_GPIO26_SEL_2_GPIO26_SHIFT 2
#define PADCTRLREG_GPIO26_SEL_2_GPIO26_MASK 0x00000004
#define PADCTRLREG_GPIO26_SEL_1_GPIO26_SHIFT 1
#define PADCTRLREG_GPIO26_SEL_1_GPIO26_MASK 0x00000002
#define PADCTRLREG_GPIO26_SEL_0_GPIO26_SHIFT 0
#define PADCTRLREG_GPIO26_SEL_0_GPIO26_MASK 0x00000001
#define PADCTRLREG_GPIO27_OFFSET 0x000000A8
#define PADCTRLREG_GPIO27_TYPE UInt32
#define PADCTRLREG_GPIO27_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO27_PINSEL_GPIO27_SHIFT 8
#define PADCTRLREG_GPIO27_PINSEL_GPIO27_MASK 0x00000700
#define PADCTRLREG_GPIO27_HYS_EN_GPIO27_SHIFT 7
#define PADCTRLREG_GPIO27_HYS_EN_GPIO27_MASK 0x00000080
#define PADCTRLREG_GPIO27_PDN_GPIO27_SHIFT 6
#define PADCTRLREG_GPIO27_PDN_GPIO27_MASK 0x00000040
#define PADCTRLREG_GPIO27_PUP_GPIO27_SHIFT 5
#define PADCTRLREG_GPIO27_PUP_GPIO27_MASK 0x00000020
#define PADCTRLREG_GPIO27_SRC_GPIO27_SHIFT 4
#define PADCTRLREG_GPIO27_SRC_GPIO27_MASK 0x00000010
#define PADCTRLREG_GPIO27_IND_GPIO27_SHIFT 3
#define PADCTRLREG_GPIO27_IND_GPIO27_MASK 0x00000008
#define PADCTRLREG_GPIO27_SEL_2_GPIO27_SHIFT 2
#define PADCTRLREG_GPIO27_SEL_2_GPIO27_MASK 0x00000004
#define PADCTRLREG_GPIO27_SEL_1_GPIO27_SHIFT 1
#define PADCTRLREG_GPIO27_SEL_1_GPIO27_MASK 0x00000002
#define PADCTRLREG_GPIO27_SEL_0_GPIO27_SHIFT 0
#define PADCTRLREG_GPIO27_SEL_0_GPIO27_MASK 0x00000001
#define PADCTRLREG_GPIO28_OFFSET 0x000000AC
#define PADCTRLREG_GPIO28_TYPE UInt32
#define PADCTRLREG_GPIO28_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO28_PINSEL_GPIO28_SHIFT 8
#define PADCTRLREG_GPIO28_PINSEL_GPIO28_MASK 0x00000700
#define PADCTRLREG_GPIO28_HYS_EN_GPIO28_SHIFT 7
#define PADCTRLREG_GPIO28_HYS_EN_GPIO28_MASK 0x00000080
#define PADCTRLREG_GPIO28_PDN_GPIO28_SHIFT 6
#define PADCTRLREG_GPIO28_PDN_GPIO28_MASK 0x00000040
#define PADCTRLREG_GPIO28_PUP_GPIO28_SHIFT 5
#define PADCTRLREG_GPIO28_PUP_GPIO28_MASK 0x00000020
#define PADCTRLREG_GPIO28_SRC_GPIO28_SHIFT 4
#define PADCTRLREG_GPIO28_SRC_GPIO28_MASK 0x00000010
#define PADCTRLREG_GPIO28_IND_GPIO28_SHIFT 3
#define PADCTRLREG_GPIO28_IND_GPIO28_MASK 0x00000008
#define PADCTRLREG_GPIO28_SEL_2_GPIO28_SHIFT 2
#define PADCTRLREG_GPIO28_SEL_2_GPIO28_MASK 0x00000004
#define PADCTRLREG_GPIO28_SEL_1_GPIO28_SHIFT 1
#define PADCTRLREG_GPIO28_SEL_1_GPIO28_MASK 0x00000002
#define PADCTRLREG_GPIO28_SEL_0_GPIO28_SHIFT 0
#define PADCTRLREG_GPIO28_SEL_0_GPIO28_MASK 0x00000001
#define PADCTRLREG_GPIO32_OFFSET 0x000000B0
#define PADCTRLREG_GPIO32_TYPE UInt32
#define PADCTRLREG_GPIO32_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO32_PINSEL_GPIO32_SHIFT 8
#define PADCTRLREG_GPIO32_PINSEL_GPIO32_MASK 0x00000700
#define PADCTRLREG_GPIO32_HYS_EN_GPIO32_SHIFT 7
#define PADCTRLREG_GPIO32_HYS_EN_GPIO32_MASK 0x00000080
#define PADCTRLREG_GPIO32_PDN_GPIO32_SHIFT 6
#define PADCTRLREG_GPIO32_PDN_GPIO32_MASK 0x00000040
#define PADCTRLREG_GPIO32_PUP_GPIO32_SHIFT 5
#define PADCTRLREG_GPIO32_PUP_GPIO32_MASK 0x00000020
#define PADCTRLREG_GPIO32_SRC_GPIO32_SHIFT 4
#define PADCTRLREG_GPIO32_SRC_GPIO32_MASK 0x00000010
#define PADCTRLREG_GPIO32_IND_GPIO32_SHIFT 3
#define PADCTRLREG_GPIO32_IND_GPIO32_MASK 0x00000008
#define PADCTRLREG_GPIO32_SEL_2_GPIO32_SHIFT 2
#define PADCTRLREG_GPIO32_SEL_2_GPIO32_MASK 0x00000004
#define PADCTRLREG_GPIO32_SEL_1_GPIO32_SHIFT 1
#define PADCTRLREG_GPIO32_SEL_1_GPIO32_MASK 0x00000002
#define PADCTRLREG_GPIO32_SEL_0_GPIO32_SHIFT 0
#define PADCTRLREG_GPIO32_SEL_0_GPIO32_MASK 0x00000001
#define PADCTRLREG_GPIO33_OFFSET 0x000000B4
#define PADCTRLREG_GPIO33_TYPE UInt32
#define PADCTRLREG_GPIO33_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO33_PINSEL_GPIO33_SHIFT 8
#define PADCTRLREG_GPIO33_PINSEL_GPIO33_MASK 0x00000700
#define PADCTRLREG_GPIO33_HYS_EN_GPIO33_SHIFT 7
#define PADCTRLREG_GPIO33_HYS_EN_GPIO33_MASK 0x00000080
#define PADCTRLREG_GPIO33_PDN_GPIO33_SHIFT 6
#define PADCTRLREG_GPIO33_PDN_GPIO33_MASK 0x00000040
#define PADCTRLREG_GPIO33_PUP_GPIO33_SHIFT 5
#define PADCTRLREG_GPIO33_PUP_GPIO33_MASK 0x00000020
#define PADCTRLREG_GPIO33_SRC_GPIO33_SHIFT 4
#define PADCTRLREG_GPIO33_SRC_GPIO33_MASK 0x00000010
#define PADCTRLREG_GPIO33_IND_GPIO33_SHIFT 3
#define PADCTRLREG_GPIO33_IND_GPIO33_MASK 0x00000008
#define PADCTRLREG_GPIO33_SEL_2_GPIO33_SHIFT 2
#define PADCTRLREG_GPIO33_SEL_2_GPIO33_MASK 0x00000004
#define PADCTRLREG_GPIO33_SEL_1_GPIO33_SHIFT 1
#define PADCTRLREG_GPIO33_SEL_1_GPIO33_MASK 0x00000002
#define PADCTRLREG_GPIO33_SEL_0_GPIO33_SHIFT 0
#define PADCTRLREG_GPIO33_SEL_0_GPIO33_MASK 0x00000001
#define PADCTRLREG_GPIO34_OFFSET 0x000000B8
#define PADCTRLREG_GPIO34_TYPE UInt32
#define PADCTRLREG_GPIO34_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO34_PINSEL_GPIO34_SHIFT 8
#define PADCTRLREG_GPIO34_PINSEL_GPIO34_MASK 0x00000700
#define PADCTRLREG_GPIO34_HYS_EN_GPIO34_SHIFT 7
#define PADCTRLREG_GPIO34_HYS_EN_GPIO34_MASK 0x00000080
#define PADCTRLREG_GPIO34_PDN_GPIO34_SHIFT 6
#define PADCTRLREG_GPIO34_PDN_GPIO34_MASK 0x00000040
#define PADCTRLREG_GPIO34_PUP_GPIO34_SHIFT 5
#define PADCTRLREG_GPIO34_PUP_GPIO34_MASK 0x00000020
#define PADCTRLREG_GPIO34_SRC_GPIO34_SHIFT 4
#define PADCTRLREG_GPIO34_SRC_GPIO34_MASK 0x00000010
#define PADCTRLREG_GPIO34_IND_GPIO34_SHIFT 3
#define PADCTRLREG_GPIO34_IND_GPIO34_MASK 0x00000008
#define PADCTRLREG_GPIO34_SEL_2_GPIO34_SHIFT 2
#define PADCTRLREG_GPIO34_SEL_2_GPIO34_MASK 0x00000004
#define PADCTRLREG_GPIO34_SEL_1_GPIO34_SHIFT 1
#define PADCTRLREG_GPIO34_SEL_1_GPIO34_MASK 0x00000002
#define PADCTRLREG_GPIO34_SEL_0_GPIO34_SHIFT 0
#define PADCTRLREG_GPIO34_SEL_0_GPIO34_MASK 0x00000001
#define PADCTRLREG_GPS_CALREQ_OFFSET 0x000000BC
#define PADCTRLREG_GPS_CALREQ_TYPE UInt32
#define PADCTRLREG_GPS_CALREQ_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPS_CALREQ_PINSEL_GPS_CALREQ_SHIFT 8
#define PADCTRLREG_GPS_CALREQ_PINSEL_GPS_CALREQ_MASK 0x00000700
#define PADCTRLREG_GPS_CALREQ_HYS_EN_GPS_CALREQ_SHIFT 7
#define PADCTRLREG_GPS_CALREQ_HYS_EN_GPS_CALREQ_MASK 0x00000080
#define PADCTRLREG_GPS_CALREQ_PDN_GPS_CALREQ_SHIFT 6
#define PADCTRLREG_GPS_CALREQ_PDN_GPS_CALREQ_MASK 0x00000040
#define PADCTRLREG_GPS_CALREQ_PUP_GPS_CALREQ_SHIFT 5
#define PADCTRLREG_GPS_CALREQ_PUP_GPS_CALREQ_MASK 0x00000020
#define PADCTRLREG_GPS_CALREQ_SRC_GPS_CALREQ_SHIFT 4
#define PADCTRLREG_GPS_CALREQ_SRC_GPS_CALREQ_MASK 0x00000010
#define PADCTRLREG_GPS_CALREQ_IND_GPS_CALREQ_SHIFT 3
#define PADCTRLREG_GPS_CALREQ_IND_GPS_CALREQ_MASK 0x00000008
#define PADCTRLREG_GPS_CALREQ_SEL_2_GPS_CALREQ_SHIFT 2
#define PADCTRLREG_GPS_CALREQ_SEL_2_GPS_CALREQ_MASK 0x00000004
#define PADCTRLREG_GPS_CALREQ_SEL_1_GPS_CALREQ_SHIFT 1
#define PADCTRLREG_GPS_CALREQ_SEL_1_GPS_CALREQ_MASK 0x00000002
#define PADCTRLREG_GPS_CALREQ_SEL_0_GPS_CALREQ_SHIFT 0
#define PADCTRLREG_GPS_CALREQ_SEL_0_GPS_CALREQ_MASK 0x00000001
#define PADCTRLREG_GPS_HOSTREQ_OFFSET 0x000000C0
#define PADCTRLREG_GPS_HOSTREQ_TYPE UInt32
#define PADCTRLREG_GPS_HOSTREQ_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPS_HOSTREQ_PINSEL_GPS_HOSTREQ_SHIFT 8
#define PADCTRLREG_GPS_HOSTREQ_PINSEL_GPS_HOSTREQ_MASK 0x00000700
#define PADCTRLREG_GPS_HOSTREQ_HYS_EN_GPS_HOSTREQ_SHIFT 7
#define PADCTRLREG_GPS_HOSTREQ_HYS_EN_GPS_HOSTREQ_MASK 0x00000080
#define PADCTRLREG_GPS_HOSTREQ_PDN_GPS_HOSTREQ_SHIFT 6
#define PADCTRLREG_GPS_HOSTREQ_PDN_GPS_HOSTREQ_MASK 0x00000040
#define PADCTRLREG_GPS_HOSTREQ_PUP_GPS_HOSTREQ_SHIFT 5
#define PADCTRLREG_GPS_HOSTREQ_PUP_GPS_HOSTREQ_MASK 0x00000020
#define PADCTRLREG_GPS_HOSTREQ_SRC_GPS_HOSTREQ_SHIFT 4
#define PADCTRLREG_GPS_HOSTREQ_SRC_GPS_HOSTREQ_MASK 0x00000010
#define PADCTRLREG_GPS_HOSTREQ_IND_GPS_HOSTREQ_SHIFT 3
#define PADCTRLREG_GPS_HOSTREQ_IND_GPS_HOSTREQ_MASK 0x00000008
#define PADCTRLREG_GPS_HOSTREQ_SEL_2_GPS_HOSTREQ_SHIFT 2
#define PADCTRLREG_GPS_HOSTREQ_SEL_2_GPS_HOSTREQ_MASK 0x00000004
#define PADCTRLREG_GPS_HOSTREQ_SEL_1_GPS_HOSTREQ_SHIFT 1
#define PADCTRLREG_GPS_HOSTREQ_SEL_1_GPS_HOSTREQ_MASK 0x00000002
#define PADCTRLREG_GPS_HOSTREQ_SEL_0_GPS_HOSTREQ_SHIFT 0
#define PADCTRLREG_GPS_HOSTREQ_SEL_0_GPS_HOSTREQ_MASK 0x00000001
#define PADCTRLREG_GPS_PABLANK_OFFSET 0x000000C4
#define PADCTRLREG_GPS_PABLANK_TYPE UInt32
#define PADCTRLREG_GPS_PABLANK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPS_PABLANK_PINSEL_GPS_PABLANK_SHIFT 8
#define PADCTRLREG_GPS_PABLANK_PINSEL_GPS_PABLANK_MASK 0x00000700
#define PADCTRLREG_GPS_PABLANK_HYS_EN_GPS_PABLANK_SHIFT 7
#define PADCTRLREG_GPS_PABLANK_HYS_EN_GPS_PABLANK_MASK 0x00000080
#define PADCTRLREG_GPS_PABLANK_PDN_GPS_PABLANK_SHIFT 6
#define PADCTRLREG_GPS_PABLANK_PDN_GPS_PABLANK_MASK 0x00000040
#define PADCTRLREG_GPS_PABLANK_PUP_GPS_PABLANK_SHIFT 5
#define PADCTRLREG_GPS_PABLANK_PUP_GPS_PABLANK_MASK 0x00000020
#define PADCTRLREG_GPS_PABLANK_SRC_GPS_PABLANK_SHIFT 4
#define PADCTRLREG_GPS_PABLANK_SRC_GPS_PABLANK_MASK 0x00000010
#define PADCTRLREG_GPS_PABLANK_IND_GPS_PABLANK_SHIFT 3
#define PADCTRLREG_GPS_PABLANK_IND_GPS_PABLANK_MASK 0x00000008
#define PADCTRLREG_GPS_PABLANK_SEL_2_GPS_PABLANK_SHIFT 2
#define PADCTRLREG_GPS_PABLANK_SEL_2_GPS_PABLANK_MASK 0x00000004
#define PADCTRLREG_GPS_PABLANK_SEL_1_GPS_PABLANK_SHIFT 1
#define PADCTRLREG_GPS_PABLANK_SEL_1_GPS_PABLANK_MASK 0x00000002
#define PADCTRLREG_GPS_PABLANK_SEL_0_GPS_PABLANK_SHIFT 0
#define PADCTRLREG_GPS_PABLANK_SEL_0_GPS_PABLANK_MASK 0x00000001
#define PADCTRLREG_GPS_TMARK_OFFSET 0x000000C8
#define PADCTRLREG_GPS_TMARK_TYPE UInt32
#define PADCTRLREG_GPS_TMARK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPS_TMARK_PINSEL_GPS_TMARK_SHIFT 8
#define PADCTRLREG_GPS_TMARK_PINSEL_GPS_TMARK_MASK 0x00000700
#define PADCTRLREG_GPS_TMARK_HYS_EN_GPS_TMARK_SHIFT 7
#define PADCTRLREG_GPS_TMARK_HYS_EN_GPS_TMARK_MASK 0x00000080
#define PADCTRLREG_GPS_TMARK_PDN_GPS_TMARK_SHIFT 6
#define PADCTRLREG_GPS_TMARK_PDN_GPS_TMARK_MASK 0x00000040
#define PADCTRLREG_GPS_TMARK_PUP_GPS_TMARK_SHIFT 5
#define PADCTRLREG_GPS_TMARK_PUP_GPS_TMARK_MASK 0x00000020
#define PADCTRLREG_GPS_TMARK_SRC_GPS_TMARK_SHIFT 4
#define PADCTRLREG_GPS_TMARK_SRC_GPS_TMARK_MASK 0x00000010
#define PADCTRLREG_GPS_TMARK_IND_GPS_TMARK_SHIFT 3
#define PADCTRLREG_GPS_TMARK_IND_GPS_TMARK_MASK 0x00000008
#define PADCTRLREG_GPS_TMARK_SEL_2_GPS_TMARK_SHIFT 2
#define PADCTRLREG_GPS_TMARK_SEL_2_GPS_TMARK_MASK 0x00000004
#define PADCTRLREG_GPS_TMARK_SEL_1_GPS_TMARK_SHIFT 1
#define PADCTRLREG_GPS_TMARK_SEL_1_GPS_TMARK_MASK 0x00000002
#define PADCTRLREG_GPS_TMARK_SEL_0_GPS_TMARK_SHIFT 0
#define PADCTRLREG_GPS_TMARK_SEL_0_GPS_TMARK_MASK 0x00000001
#define PADCTRLREG_ICUSBDM_OFFSET 0x000000CC
#define PADCTRLREG_ICUSBDM_TYPE UInt32
#define PADCTRLREG_ICUSBDM_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_ICUSBDM_PINSEL_ICUSBDM_SHIFT 8
#define PADCTRLREG_ICUSBDM_PINSEL_ICUSBDM_MASK 0x00000700
#define PADCTRLREG_ICUSBDM_HYS_EN_ICUSBDM_SHIFT 7
#define PADCTRLREG_ICUSBDM_HYS_EN_ICUSBDM_MASK 0x00000080
#define PADCTRLREG_ICUSBDM_PDN_ICUSBDM_SHIFT 6
#define PADCTRLREG_ICUSBDM_PDN_ICUSBDM_MASK 0x00000040
#define PADCTRLREG_ICUSBDM_PUP_ICUSBDM_SHIFT 5
#define PADCTRLREG_ICUSBDM_PUP_ICUSBDM_MASK 0x00000020
#define PADCTRLREG_ICUSBDM_SRC_ICUSBDM_SHIFT 4
#define PADCTRLREG_ICUSBDM_SRC_ICUSBDM_MASK 0x00000010
#define PADCTRLREG_ICUSBDM_IND_ICUSBDM_SHIFT 3
#define PADCTRLREG_ICUSBDM_IND_ICUSBDM_MASK 0x00000008
#define PADCTRLREG_ICUSBDM_SEL_2_ICUSBDM_SHIFT 2
#define PADCTRLREG_ICUSBDM_SEL_2_ICUSBDM_MASK 0x00000004
#define PADCTRLREG_ICUSBDM_SEL_1_ICUSBDM_SHIFT 1
#define PADCTRLREG_ICUSBDM_SEL_1_ICUSBDM_MASK 0x00000002
#define PADCTRLREG_ICUSBDM_SEL_0_ICUSBDM_SHIFT 0
#define PADCTRLREG_ICUSBDM_SEL_0_ICUSBDM_MASK 0x00000001
#define PADCTRLREG_ICUSBDP_OFFSET 0x000000D0
#define PADCTRLREG_ICUSBDP_TYPE UInt32
#define PADCTRLREG_ICUSBDP_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_ICUSBDP_PINSEL_ICUSBDP_SHIFT 8
#define PADCTRLREG_ICUSBDP_PINSEL_ICUSBDP_MASK 0x00000700
#define PADCTRLREG_ICUSBDP_HYS_EN_ICUSBDP_SHIFT 7
#define PADCTRLREG_ICUSBDP_HYS_EN_ICUSBDP_MASK 0x00000080
#define PADCTRLREG_ICUSBDP_PDN_ICUSBDP_SHIFT 6
#define PADCTRLREG_ICUSBDP_PDN_ICUSBDP_MASK 0x00000040
#define PADCTRLREG_ICUSBDP_PUP_ICUSBDP_SHIFT 5
#define PADCTRLREG_ICUSBDP_PUP_ICUSBDP_MASK 0x00000020
#define PADCTRLREG_ICUSBDP_SRC_ICUSBDP_SHIFT 4
#define PADCTRLREG_ICUSBDP_SRC_ICUSBDP_MASK 0x00000010
#define PADCTRLREG_ICUSBDP_IND_ICUSBDP_SHIFT 3
#define PADCTRLREG_ICUSBDP_IND_ICUSBDP_MASK 0x00000008
#define PADCTRLREG_ICUSBDP_SEL_2_ICUSBDP_SHIFT 2
#define PADCTRLREG_ICUSBDP_SEL_2_ICUSBDP_MASK 0x00000004
#define PADCTRLREG_ICUSBDP_SEL_1_ICUSBDP_SHIFT 1
#define PADCTRLREG_ICUSBDP_SEL_1_ICUSBDP_MASK 0x00000002
#define PADCTRLREG_ICUSBDP_SEL_0_ICUSBDP_SHIFT 0
#define PADCTRLREG_ICUSBDP_SEL_0_ICUSBDP_MASK 0x00000001
#define PADCTRLREG_LCDCS0_OFFSET 0x000000D4
#define PADCTRLREG_LCDCS0_TYPE UInt32
#define PADCTRLREG_LCDCS0_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_LCDCS0_PINSEL_LCDCS0_SHIFT 8
#define PADCTRLREG_LCDCS0_PINSEL_LCDCS0_MASK 0x00000700
#define PADCTRLREG_LCDCS0_HYS_EN_LCDCS0_SHIFT 7
#define PADCTRLREG_LCDCS0_HYS_EN_LCDCS0_MASK 0x00000080
#define PADCTRLREG_LCDCS0_PDN_LCDCS0_SHIFT 6
#define PADCTRLREG_LCDCS0_PDN_LCDCS0_MASK 0x00000040
#define PADCTRLREG_LCDCS0_PUP_LCDCS0_SHIFT 5
#define PADCTRLREG_LCDCS0_PUP_LCDCS0_MASK 0x00000020
#define PADCTRLREG_LCDCS0_SRC_LCDCS0_SHIFT 4
#define PADCTRLREG_LCDCS0_SRC_LCDCS0_MASK 0x00000010
#define PADCTRLREG_LCDCS0_IND_LCDCS0_SHIFT 3
#define PADCTRLREG_LCDCS0_IND_LCDCS0_MASK 0x00000008
#define PADCTRLREG_LCDCS0_SEL_2_LCDCS0_SHIFT 2
#define PADCTRLREG_LCDCS0_SEL_2_LCDCS0_MASK 0x00000004
#define PADCTRLREG_LCDCS0_SEL_1_LCDCS0_SHIFT 1
#define PADCTRLREG_LCDCS0_SEL_1_LCDCS0_MASK 0x00000002
#define PADCTRLREG_LCDCS0_SEL_0_LCDCS0_SHIFT 0
#define PADCTRLREG_LCDCS0_SEL_0_LCDCS0_MASK 0x00000001
#define PADCTRLREG_LCDRES_OFFSET 0x000000D8
#define PADCTRLREG_LCDRES_TYPE UInt32
#define PADCTRLREG_LCDRES_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_LCDRES_PINSEL_LCDRES_SHIFT 8
#define PADCTRLREG_LCDRES_PINSEL_LCDRES_MASK 0x00000700
#define PADCTRLREG_LCDRES_HYS_EN_LCDRES_SHIFT 7
#define PADCTRLREG_LCDRES_HYS_EN_LCDRES_MASK 0x00000080
#define PADCTRLREG_LCDRES_PDN_LCDRES_SHIFT 6
#define PADCTRLREG_LCDRES_PDN_LCDRES_MASK 0x00000040
#define PADCTRLREG_LCDRES_PUP_LCDRES_SHIFT 5
#define PADCTRLREG_LCDRES_PUP_LCDRES_MASK 0x00000020
#define PADCTRLREG_LCDRES_SRC_LCDRES_SHIFT 4
#define PADCTRLREG_LCDRES_SRC_LCDRES_MASK 0x00000010
#define PADCTRLREG_LCDRES_IND_LCDRES_SHIFT 3
#define PADCTRLREG_LCDRES_IND_LCDRES_MASK 0x00000008
#define PADCTRLREG_LCDRES_SEL_2_LCDRES_SHIFT 2
#define PADCTRLREG_LCDRES_SEL_2_LCDRES_MASK 0x00000004
#define PADCTRLREG_LCDRES_SEL_1_LCDRES_SHIFT 1
#define PADCTRLREG_LCDRES_SEL_1_LCDRES_MASK 0x00000002
#define PADCTRLREG_LCDRES_SEL_0_LCDRES_SHIFT 0
#define PADCTRLREG_LCDRES_SEL_0_LCDRES_MASK 0x00000001
#define PADCTRLREG_LCDSCL_OFFSET 0x000000DC
#define PADCTRLREG_LCDSCL_TYPE UInt32
#define PADCTRLREG_LCDSCL_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_LCDSCL_PINSEL_LCDSCL_SHIFT 8
#define PADCTRLREG_LCDSCL_PINSEL_LCDSCL_MASK 0x00000700
#define PADCTRLREG_LCDSCL_HYS_EN_LCDSCL_SHIFT 7
#define PADCTRLREG_LCDSCL_HYS_EN_LCDSCL_MASK 0x00000080
#define PADCTRLREG_LCDSCL_PDN_LCDSCL_SHIFT 6
#define PADCTRLREG_LCDSCL_PDN_LCDSCL_MASK 0x00000040
#define PADCTRLREG_LCDSCL_PUP_LCDSCL_SHIFT 5
#define PADCTRLREG_LCDSCL_PUP_LCDSCL_MASK 0x00000020
#define PADCTRLREG_LCDSCL_SRC_LCDSCL_SHIFT 4
#define PADCTRLREG_LCDSCL_SRC_LCDSCL_MASK 0x00000010
#define PADCTRLREG_LCDSCL_IND_LCDSCL_SHIFT 3
#define PADCTRLREG_LCDSCL_IND_LCDSCL_MASK 0x00000008
#define PADCTRLREG_LCDSCL_SEL_2_LCDSCL_SHIFT 2
#define PADCTRLREG_LCDSCL_SEL_2_LCDSCL_MASK 0x00000004
#define PADCTRLREG_LCDSCL_SEL_1_LCDSCL_SHIFT 1
#define PADCTRLREG_LCDSCL_SEL_1_LCDSCL_MASK 0x00000002
#define PADCTRLREG_LCDSCL_SEL_0_LCDSCL_SHIFT 0
#define PADCTRLREG_LCDSCL_SEL_0_LCDSCL_MASK 0x00000001
#define PADCTRLREG_LCDSDA_OFFSET 0x000000E0
#define PADCTRLREG_LCDSDA_TYPE UInt32
#define PADCTRLREG_LCDSDA_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_LCDSDA_PINSEL_LCDSDA_SHIFT 8
#define PADCTRLREG_LCDSDA_PINSEL_LCDSDA_MASK 0x00000700
#define PADCTRLREG_LCDSDA_HYS_EN_LCDSDA_SHIFT 7
#define PADCTRLREG_LCDSDA_HYS_EN_LCDSDA_MASK 0x00000080
#define PADCTRLREG_LCDSDA_PDN_LCDSDA_SHIFT 6
#define PADCTRLREG_LCDSDA_PDN_LCDSDA_MASK 0x00000040
#define PADCTRLREG_LCDSDA_PUP_LCDSDA_SHIFT 5
#define PADCTRLREG_LCDSDA_PUP_LCDSDA_MASK 0x00000020
#define PADCTRLREG_LCDSDA_SRC_LCDSDA_SHIFT 4
#define PADCTRLREG_LCDSDA_SRC_LCDSDA_MASK 0x00000010
#define PADCTRLREG_LCDSDA_IND_LCDSDA_SHIFT 3
#define PADCTRLREG_LCDSDA_IND_LCDSDA_MASK 0x00000008
#define PADCTRLREG_LCDSDA_SEL_2_LCDSDA_SHIFT 2
#define PADCTRLREG_LCDSDA_SEL_2_LCDSDA_MASK 0x00000004
#define PADCTRLREG_LCDSDA_SEL_1_LCDSDA_SHIFT 1
#define PADCTRLREG_LCDSDA_SEL_1_LCDSDA_MASK 0x00000002
#define PADCTRLREG_LCDSDA_SEL_0_LCDSDA_SHIFT 0
#define PADCTRLREG_LCDSDA_SEL_0_LCDSDA_MASK 0x00000001
#define PADCTRLREG_LCDTE_OFFSET 0x000000E4
#define PADCTRLREG_LCDTE_TYPE UInt32
#define PADCTRLREG_LCDTE_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_LCDTE_PINSEL_LCDTE_SHIFT 8
#define PADCTRLREG_LCDTE_PINSEL_LCDTE_MASK 0x00000700
#define PADCTRLREG_LCDTE_HYS_EN_LCDTE_SHIFT 7
#define PADCTRLREG_LCDTE_HYS_EN_LCDTE_MASK 0x00000080
#define PADCTRLREG_LCDTE_PDN_LCDTE_SHIFT 6
#define PADCTRLREG_LCDTE_PDN_LCDTE_MASK 0x00000040
#define PADCTRLREG_LCDTE_PUP_LCDTE_SHIFT 5
#define PADCTRLREG_LCDTE_PUP_LCDTE_MASK 0x00000020
#define PADCTRLREG_LCDTE_SRC_LCDTE_SHIFT 4
#define PADCTRLREG_LCDTE_SRC_LCDTE_MASK 0x00000010
#define PADCTRLREG_LCDTE_IND_LCDTE_SHIFT 3
#define PADCTRLREG_LCDTE_IND_LCDTE_MASK 0x00000008
#define PADCTRLREG_LCDTE_SEL_2_LCDTE_SHIFT 2
#define PADCTRLREG_LCDTE_SEL_2_LCDTE_MASK 0x00000004
#define PADCTRLREG_LCDTE_SEL_1_LCDTE_SHIFT 1
#define PADCTRLREG_LCDTE_SEL_1_LCDTE_MASK 0x00000002
#define PADCTRLREG_LCDTE_SEL_0_LCDTE_SHIFT 0
#define PADCTRLREG_LCDTE_SEL_0_LCDTE_MASK 0x00000001
#define PADCTRLREG_MDMGPIO00_OFFSET 0x000000E8
#define PADCTRLREG_MDMGPIO00_TYPE UInt32
#define PADCTRLREG_MDMGPIO00_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO00_PINSEL_MDMGPIO00_SHIFT 8
#define PADCTRLREG_MDMGPIO00_PINSEL_MDMGPIO00_MASK 0x00000700
#define PADCTRLREG_MDMGPIO00_HYS_EN_MDMGPIO00_SHIFT 7
#define PADCTRLREG_MDMGPIO00_HYS_EN_MDMGPIO00_MASK 0x00000080
#define PADCTRLREG_MDMGPIO00_PDN_MDMGPIO00_SHIFT 6
#define PADCTRLREG_MDMGPIO00_PDN_MDMGPIO00_MASK 0x00000040
#define PADCTRLREG_MDMGPIO00_PUP_MDMGPIO00_SHIFT 5
#define PADCTRLREG_MDMGPIO00_PUP_MDMGPIO00_MASK 0x00000020
#define PADCTRLREG_MDMGPIO00_SRC_MDMGPIO00_SHIFT 4
#define PADCTRLREG_MDMGPIO00_SRC_MDMGPIO00_MASK 0x00000010
#define PADCTRLREG_MDMGPIO00_IND_MDMGPIO00_SHIFT 3
#define PADCTRLREG_MDMGPIO00_IND_MDMGPIO00_MASK 0x00000008
#define PADCTRLREG_MDMGPIO00_SEL_2_MDMGPIO00_SHIFT 2
#define PADCTRLREG_MDMGPIO00_SEL_2_MDMGPIO00_MASK 0x00000004
#define PADCTRLREG_MDMGPIO00_SEL_1_MDMGPIO00_SHIFT 1
#define PADCTRLREG_MDMGPIO00_SEL_1_MDMGPIO00_MASK 0x00000002
#define PADCTRLREG_MDMGPIO00_SEL_0_MDMGPIO00_SHIFT 0
#define PADCTRLREG_MDMGPIO00_SEL_0_MDMGPIO00_MASK 0x00000001
#define PADCTRLREG_MDMGPIO01_OFFSET 0x000000EC
#define PADCTRLREG_MDMGPIO01_TYPE UInt32
#define PADCTRLREG_MDMGPIO01_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO01_PINSEL_MDMGPIO01_SHIFT 8
#define PADCTRLREG_MDMGPIO01_PINSEL_MDMGPIO01_MASK 0x00000700
#define PADCTRLREG_MDMGPIO01_HYS_EN_MDMGPIO01_SHIFT 7
#define PADCTRLREG_MDMGPIO01_HYS_EN_MDMGPIO01_MASK 0x00000080
#define PADCTRLREG_MDMGPIO01_PDN_MDMGPIO01_SHIFT 6
#define PADCTRLREG_MDMGPIO01_PDN_MDMGPIO01_MASK 0x00000040
#define PADCTRLREG_MDMGPIO01_PUP_MDMGPIO01_SHIFT 5
#define PADCTRLREG_MDMGPIO01_PUP_MDMGPIO01_MASK 0x00000020
#define PADCTRLREG_MDMGPIO01_SRC_MDMGPIO01_SHIFT 4
#define PADCTRLREG_MDMGPIO01_SRC_MDMGPIO01_MASK 0x00000010
#define PADCTRLREG_MDMGPIO01_IND_MDMGPIO01_SHIFT 3
#define PADCTRLREG_MDMGPIO01_IND_MDMGPIO01_MASK 0x00000008
#define PADCTRLREG_MDMGPIO01_SEL_2_MDMGPIO01_SHIFT 2
#define PADCTRLREG_MDMGPIO01_SEL_2_MDMGPIO01_MASK 0x00000004
#define PADCTRLREG_MDMGPIO01_SEL_1_MDMGPIO01_SHIFT 1
#define PADCTRLREG_MDMGPIO01_SEL_1_MDMGPIO01_MASK 0x00000002
#define PADCTRLREG_MDMGPIO01_SEL_0_MDMGPIO01_SHIFT 0
#define PADCTRLREG_MDMGPIO01_SEL_0_MDMGPIO01_MASK 0x00000001
#define PADCTRLREG_MDMGPIO02_OFFSET 0x000000F0
#define PADCTRLREG_MDMGPIO02_TYPE UInt32
#define PADCTRLREG_MDMGPIO02_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO02_PINSEL_MDMGPIO02_SHIFT 8
#define PADCTRLREG_MDMGPIO02_PINSEL_MDMGPIO02_MASK 0x00000700
#define PADCTRLREG_MDMGPIO02_HYS_EN_MDMGPIO02_SHIFT 7
#define PADCTRLREG_MDMGPIO02_HYS_EN_MDMGPIO02_MASK 0x00000080
#define PADCTRLREG_MDMGPIO02_PDN_MDMGPIO02_SHIFT 6
#define PADCTRLREG_MDMGPIO02_PDN_MDMGPIO02_MASK 0x00000040
#define PADCTRLREG_MDMGPIO02_PUP_MDMGPIO02_SHIFT 5
#define PADCTRLREG_MDMGPIO02_PUP_MDMGPIO02_MASK 0x00000020
#define PADCTRLREG_MDMGPIO02_SRC_MDMGPIO02_SHIFT 4
#define PADCTRLREG_MDMGPIO02_SRC_MDMGPIO02_MASK 0x00000010
#define PADCTRLREG_MDMGPIO02_IND_MDMGPIO02_SHIFT 3
#define PADCTRLREG_MDMGPIO02_IND_MDMGPIO02_MASK 0x00000008
#define PADCTRLREG_MDMGPIO02_SEL_2_MDMGPIO02_SHIFT 2
#define PADCTRLREG_MDMGPIO02_SEL_2_MDMGPIO02_MASK 0x00000004
#define PADCTRLREG_MDMGPIO02_SEL_1_MDMGPIO02_SHIFT 1
#define PADCTRLREG_MDMGPIO02_SEL_1_MDMGPIO02_MASK 0x00000002
#define PADCTRLREG_MDMGPIO02_SEL_0_MDMGPIO02_SHIFT 0
#define PADCTRLREG_MDMGPIO02_SEL_0_MDMGPIO02_MASK 0x00000001
#define PADCTRLREG_MDMGPIO03_OFFSET 0x000000F4
#define PADCTRLREG_MDMGPIO03_TYPE UInt32
#define PADCTRLREG_MDMGPIO03_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO03_PINSEL_MDMGPIO03_SHIFT 8
#define PADCTRLREG_MDMGPIO03_PINSEL_MDMGPIO03_MASK 0x00000700
#define PADCTRLREG_MDMGPIO03_HYS_EN_MDMGPIO03_SHIFT 7
#define PADCTRLREG_MDMGPIO03_HYS_EN_MDMGPIO03_MASK 0x00000080
#define PADCTRLREG_MDMGPIO03_PDN_MDMGPIO03_SHIFT 6
#define PADCTRLREG_MDMGPIO03_PDN_MDMGPIO03_MASK 0x00000040
#define PADCTRLREG_MDMGPIO03_PUP_MDMGPIO03_SHIFT 5
#define PADCTRLREG_MDMGPIO03_PUP_MDMGPIO03_MASK 0x00000020
#define PADCTRLREG_MDMGPIO03_SRC_MDMGPIO03_SHIFT 4
#define PADCTRLREG_MDMGPIO03_SRC_MDMGPIO03_MASK 0x00000010
#define PADCTRLREG_MDMGPIO03_IND_MDMGPIO03_SHIFT 3
#define PADCTRLREG_MDMGPIO03_IND_MDMGPIO03_MASK 0x00000008
#define PADCTRLREG_MDMGPIO03_SEL_2_MDMGPIO03_SHIFT 2
#define PADCTRLREG_MDMGPIO03_SEL_2_MDMGPIO03_MASK 0x00000004
#define PADCTRLREG_MDMGPIO03_SEL_1_MDMGPIO03_SHIFT 1
#define PADCTRLREG_MDMGPIO03_SEL_1_MDMGPIO03_MASK 0x00000002
#define PADCTRLREG_MDMGPIO03_SEL_0_MDMGPIO03_SHIFT 0
#define PADCTRLREG_MDMGPIO03_SEL_0_MDMGPIO03_MASK 0x00000001
#define PADCTRLREG_MDMGPIO04_OFFSET 0x000000F8
#define PADCTRLREG_MDMGPIO04_TYPE UInt32
#define PADCTRLREG_MDMGPIO04_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO04_PINSEL_MDMGPIO04_SHIFT 8
#define PADCTRLREG_MDMGPIO04_PINSEL_MDMGPIO04_MASK 0x00000700
#define PADCTRLREG_MDMGPIO04_HYS_EN_MDMGPIO04_SHIFT 7
#define PADCTRLREG_MDMGPIO04_HYS_EN_MDMGPIO04_MASK 0x00000080
#define PADCTRLREG_MDMGPIO04_PDN_MDMGPIO04_SHIFT 6
#define PADCTRLREG_MDMGPIO04_PDN_MDMGPIO04_MASK 0x00000040
#define PADCTRLREG_MDMGPIO04_PUP_MDMGPIO04_SHIFT 5
#define PADCTRLREG_MDMGPIO04_PUP_MDMGPIO04_MASK 0x00000020
#define PADCTRLREG_MDMGPIO04_SRC_MDMGPIO04_SHIFT 4
#define PADCTRLREG_MDMGPIO04_SRC_MDMGPIO04_MASK 0x00000010
#define PADCTRLREG_MDMGPIO04_IND_MDMGPIO04_SHIFT 3
#define PADCTRLREG_MDMGPIO04_IND_MDMGPIO04_MASK 0x00000008
#define PADCTRLREG_MDMGPIO04_SEL_2_MDMGPIO04_SHIFT 2
#define PADCTRLREG_MDMGPIO04_SEL_2_MDMGPIO04_MASK 0x00000004
#define PADCTRLREG_MDMGPIO04_SEL_1_MDMGPIO04_SHIFT 1
#define PADCTRLREG_MDMGPIO04_SEL_1_MDMGPIO04_MASK 0x00000002
#define PADCTRLREG_MDMGPIO04_SEL_0_MDMGPIO04_SHIFT 0
#define PADCTRLREG_MDMGPIO04_SEL_0_MDMGPIO04_MASK 0x00000001
#define PADCTRLREG_MDMGPIO05_OFFSET 0x000000FC
#define PADCTRLREG_MDMGPIO05_TYPE UInt32
#define PADCTRLREG_MDMGPIO05_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO05_PINSEL_MDMGPIO05_SHIFT 8
#define PADCTRLREG_MDMGPIO05_PINSEL_MDMGPIO05_MASK 0x00000700
#define PADCTRLREG_MDMGPIO05_HYS_EN_MDMGPIO05_SHIFT 7
#define PADCTRLREG_MDMGPIO05_HYS_EN_MDMGPIO05_MASK 0x00000080
#define PADCTRLREG_MDMGPIO05_PDN_MDMGPIO05_SHIFT 6
#define PADCTRLREG_MDMGPIO05_PDN_MDMGPIO05_MASK 0x00000040
#define PADCTRLREG_MDMGPIO05_PUP_MDMGPIO05_SHIFT 5
#define PADCTRLREG_MDMGPIO05_PUP_MDMGPIO05_MASK 0x00000020
#define PADCTRLREG_MDMGPIO05_SRC_MDMGPIO05_SHIFT 4
#define PADCTRLREG_MDMGPIO05_SRC_MDMGPIO05_MASK 0x00000010
#define PADCTRLREG_MDMGPIO05_IND_MDMGPIO05_SHIFT 3
#define PADCTRLREG_MDMGPIO05_IND_MDMGPIO05_MASK 0x00000008
#define PADCTRLREG_MDMGPIO05_SEL_2_MDMGPIO05_SHIFT 2
#define PADCTRLREG_MDMGPIO05_SEL_2_MDMGPIO05_MASK 0x00000004
#define PADCTRLREG_MDMGPIO05_SEL_1_MDMGPIO05_SHIFT 1
#define PADCTRLREG_MDMGPIO05_SEL_1_MDMGPIO05_MASK 0x00000002
#define PADCTRLREG_MDMGPIO05_SEL_0_MDMGPIO05_SHIFT 0
#define PADCTRLREG_MDMGPIO05_SEL_0_MDMGPIO05_MASK 0x00000001
#define PADCTRLREG_MDMGPIO06_OFFSET 0x00000100
#define PADCTRLREG_MDMGPIO06_TYPE UInt32
#define PADCTRLREG_MDMGPIO06_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO06_PINSEL_MDMGPIO06_SHIFT 8
#define PADCTRLREG_MDMGPIO06_PINSEL_MDMGPIO06_MASK 0x00000700
#define PADCTRLREG_MDMGPIO06_HYS_EN_MDMGPIO06_SHIFT 7
#define PADCTRLREG_MDMGPIO06_HYS_EN_MDMGPIO06_MASK 0x00000080
#define PADCTRLREG_MDMGPIO06_PDN_MDMGPIO06_SHIFT 6
#define PADCTRLREG_MDMGPIO06_PDN_MDMGPIO06_MASK 0x00000040
#define PADCTRLREG_MDMGPIO06_PUP_MDMGPIO06_SHIFT 5
#define PADCTRLREG_MDMGPIO06_PUP_MDMGPIO06_MASK 0x00000020
#define PADCTRLREG_MDMGPIO06_SRC_MDMGPIO06_SHIFT 4
#define PADCTRLREG_MDMGPIO06_SRC_MDMGPIO06_MASK 0x00000010
#define PADCTRLREG_MDMGPIO06_IND_MDMGPIO06_SHIFT 3
#define PADCTRLREG_MDMGPIO06_IND_MDMGPIO06_MASK 0x00000008
#define PADCTRLREG_MDMGPIO06_SEL_2_MDMGPIO06_SHIFT 2
#define PADCTRLREG_MDMGPIO06_SEL_2_MDMGPIO06_MASK 0x00000004
#define PADCTRLREG_MDMGPIO06_SEL_1_MDMGPIO06_SHIFT 1
#define PADCTRLREG_MDMGPIO06_SEL_1_MDMGPIO06_MASK 0x00000002
#define PADCTRLREG_MDMGPIO06_SEL_0_MDMGPIO06_SHIFT 0
#define PADCTRLREG_MDMGPIO06_SEL_0_MDMGPIO06_MASK 0x00000001
#define PADCTRLREG_MDMGPIO07_OFFSET 0x00000104
#define PADCTRLREG_MDMGPIO07_TYPE UInt32
#define PADCTRLREG_MDMGPIO07_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO07_PINSEL_MDMGPIO07_SHIFT 8
#define PADCTRLREG_MDMGPIO07_PINSEL_MDMGPIO07_MASK 0x00000700
#define PADCTRLREG_MDMGPIO07_HYS_EN_MDMGPIO07_SHIFT 7
#define PADCTRLREG_MDMGPIO07_HYS_EN_MDMGPIO07_MASK 0x00000080
#define PADCTRLREG_MDMGPIO07_PDN_MDMGPIO07_SHIFT 6
#define PADCTRLREG_MDMGPIO07_PDN_MDMGPIO07_MASK 0x00000040
#define PADCTRLREG_MDMGPIO07_PUP_MDMGPIO07_SHIFT 5
#define PADCTRLREG_MDMGPIO07_PUP_MDMGPIO07_MASK 0x00000020
#define PADCTRLREG_MDMGPIO07_SRC_MDMGPIO07_SHIFT 4
#define PADCTRLREG_MDMGPIO07_SRC_MDMGPIO07_MASK 0x00000010
#define PADCTRLREG_MDMGPIO07_IND_MDMGPIO07_SHIFT 3
#define PADCTRLREG_MDMGPIO07_IND_MDMGPIO07_MASK 0x00000008
#define PADCTRLREG_MDMGPIO07_SEL_2_MDMGPIO07_SHIFT 2
#define PADCTRLREG_MDMGPIO07_SEL_2_MDMGPIO07_MASK 0x00000004
#define PADCTRLREG_MDMGPIO07_SEL_1_MDMGPIO07_SHIFT 1
#define PADCTRLREG_MDMGPIO07_SEL_1_MDMGPIO07_MASK 0x00000002
#define PADCTRLREG_MDMGPIO07_SEL_0_MDMGPIO07_SHIFT 0
#define PADCTRLREG_MDMGPIO07_SEL_0_MDMGPIO07_MASK 0x00000001
#define PADCTRLREG_MDMGPIO08_OFFSET 0x00000108
#define PADCTRLREG_MDMGPIO08_TYPE UInt32
#define PADCTRLREG_MDMGPIO08_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MDMGPIO08_PINSEL_MDMGPIO08_SHIFT 8
#define PADCTRLREG_MDMGPIO08_PINSEL_MDMGPIO08_MASK 0x00000700
#define PADCTRLREG_MDMGPIO08_HYS_EN_MDMGPIO08_SHIFT 7
#define PADCTRLREG_MDMGPIO08_HYS_EN_MDMGPIO08_MASK 0x00000080
#define PADCTRLREG_MDMGPIO08_PDN_MDMGPIO08_SHIFT 6
#define PADCTRLREG_MDMGPIO08_PDN_MDMGPIO08_MASK 0x00000040
#define PADCTRLREG_MDMGPIO08_PUP_MDMGPIO08_SHIFT 5
#define PADCTRLREG_MDMGPIO08_PUP_MDMGPIO08_MASK 0x00000020
#define PADCTRLREG_MDMGPIO08_SRC_MDMGPIO08_SHIFT 4
#define PADCTRLREG_MDMGPIO08_SRC_MDMGPIO08_MASK 0x00000010
#define PADCTRLREG_MDMGPIO08_IND_MDMGPIO08_SHIFT 3
#define PADCTRLREG_MDMGPIO08_IND_MDMGPIO08_MASK 0x00000008
#define PADCTRLREG_MDMGPIO08_SEL_2_MDMGPIO08_SHIFT 2
#define PADCTRLREG_MDMGPIO08_SEL_2_MDMGPIO08_MASK 0x00000004
#define PADCTRLREG_MDMGPIO08_SEL_1_MDMGPIO08_SHIFT 1
#define PADCTRLREG_MDMGPIO08_SEL_1_MDMGPIO08_MASK 0x00000002
#define PADCTRLREG_MDMGPIO08_SEL_0_MDMGPIO08_SHIFT 0
#define PADCTRLREG_MDMGPIO08_SEL_0_MDMGPIO08_MASK 0x00000001
#define PADCTRLREG_MMC0CK_OFFSET 0x0000010C
#define PADCTRLREG_MMC0CK_TYPE UInt32
#define PADCTRLREG_MMC0CK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0CK_PINSEL_MMC0CK_SHIFT 8
#define PADCTRLREG_MMC0CK_PINSEL_MMC0CK_MASK 0x00000700
#define PADCTRLREG_MMC0CK_HYS_EN_MMC0CK_SHIFT 7
#define PADCTRLREG_MMC0CK_HYS_EN_MMC0CK_MASK 0x00000080
#define PADCTRLREG_MMC0CK_PDN_MMC0CK_SHIFT 6
#define PADCTRLREG_MMC0CK_PDN_MMC0CK_MASK 0x00000040
#define PADCTRLREG_MMC0CK_PUP_MMC0CK_SHIFT 5
#define PADCTRLREG_MMC0CK_PUP_MMC0CK_MASK 0x00000020
#define PADCTRLREG_MMC0CK_SRC_MMC0CK_SHIFT 4
#define PADCTRLREG_MMC0CK_SRC_MMC0CK_MASK 0x00000010
#define PADCTRLREG_MMC0CK_IND_MMC0CK_SHIFT 3
#define PADCTRLREG_MMC0CK_IND_MMC0CK_MASK 0x00000008
#define PADCTRLREG_MMC0CK_SEL_2_MMC0CK_SHIFT 2
#define PADCTRLREG_MMC0CK_SEL_2_MMC0CK_MASK 0x00000004
#define PADCTRLREG_MMC0CK_SEL_1_MMC0CK_SHIFT 1
#define PADCTRLREG_MMC0CK_SEL_1_MMC0CK_MASK 0x00000002
#define PADCTRLREG_MMC0CK_SEL_0_MMC0CK_SHIFT 0
#define PADCTRLREG_MMC0CK_SEL_0_MMC0CK_MASK 0x00000001
#define PADCTRLREG_MMC0CMD_OFFSET 0x00000110
#define PADCTRLREG_MMC0CMD_TYPE UInt32
#define PADCTRLREG_MMC0CMD_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0CMD_PINSEL_MMC0CMD_SHIFT 8
#define PADCTRLREG_MMC0CMD_PINSEL_MMC0CMD_MASK 0x00000700
#define PADCTRLREG_MMC0CMD_HYS_EN_MMC0CMD_SHIFT 7
#define PADCTRLREG_MMC0CMD_HYS_EN_MMC0CMD_MASK 0x00000080
#define PADCTRLREG_MMC0CMD_PDN_MMC0CMD_SHIFT 6
#define PADCTRLREG_MMC0CMD_PDN_MMC0CMD_MASK 0x00000040
#define PADCTRLREG_MMC0CMD_PUP_MMC0CMD_SHIFT 5
#define PADCTRLREG_MMC0CMD_PUP_MMC0CMD_MASK 0x00000020
#define PADCTRLREG_MMC0CMD_SRC_MMC0CMD_SHIFT 4
#define PADCTRLREG_MMC0CMD_SRC_MMC0CMD_MASK 0x00000010
#define PADCTRLREG_MMC0CMD_IND_MMC0CMD_SHIFT 3
#define PADCTRLREG_MMC0CMD_IND_MMC0CMD_MASK 0x00000008
#define PADCTRLREG_MMC0CMD_SEL_2_MMC0CMD_SHIFT 2
#define PADCTRLREG_MMC0CMD_SEL_2_MMC0CMD_MASK 0x00000004
#define PADCTRLREG_MMC0CMD_SEL_1_MMC0CMD_SHIFT 1
#define PADCTRLREG_MMC0CMD_SEL_1_MMC0CMD_MASK 0x00000002
#define PADCTRLREG_MMC0CMD_SEL_0_MMC0CMD_SHIFT 0
#define PADCTRLREG_MMC0CMD_SEL_0_MMC0CMD_MASK 0x00000001
#define PADCTRLREG_MMC0DAT0_OFFSET 0x00000114
#define PADCTRLREG_MMC0DAT0_TYPE UInt32
#define PADCTRLREG_MMC0DAT0_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT0_PINSEL_MMC0DAT0_SHIFT 8
#define PADCTRLREG_MMC0DAT0_PINSEL_MMC0DAT0_MASK 0x00000700
#define PADCTRLREG_MMC0DAT0_HYS_EN_MMC0DAT0_SHIFT 7
#define PADCTRLREG_MMC0DAT0_HYS_EN_MMC0DAT0_MASK 0x00000080
#define PADCTRLREG_MMC0DAT0_PDN_MMC0DAT0_SHIFT 6
#define PADCTRLREG_MMC0DAT0_PDN_MMC0DAT0_MASK 0x00000040
#define PADCTRLREG_MMC0DAT0_PUP_MMC0DAT0_SHIFT 5
#define PADCTRLREG_MMC0DAT0_PUP_MMC0DAT0_MASK 0x00000020
#define PADCTRLREG_MMC0DAT0_SRC_MMC0DAT0_SHIFT 4
#define PADCTRLREG_MMC0DAT0_SRC_MMC0DAT0_MASK 0x00000010
#define PADCTRLREG_MMC0DAT0_IND_MMC0DAT0_SHIFT 3
#define PADCTRLREG_MMC0DAT0_IND_MMC0DAT0_MASK 0x00000008
#define PADCTRLREG_MMC0DAT0_SEL_2_MMC0DAT0_SHIFT 2
#define PADCTRLREG_MMC0DAT0_SEL_2_MMC0DAT0_MASK 0x00000004
#define PADCTRLREG_MMC0DAT0_SEL_1_MMC0DAT0_SHIFT 1
#define PADCTRLREG_MMC0DAT0_SEL_1_MMC0DAT0_MASK 0x00000002
#define PADCTRLREG_MMC0DAT0_SEL_0_MMC0DAT0_SHIFT 0
#define PADCTRLREG_MMC0DAT0_SEL_0_MMC0DAT0_MASK 0x00000001
#define PADCTRLREG_MMC0DAT1_OFFSET 0x00000118
#define PADCTRLREG_MMC0DAT1_TYPE UInt32
#define PADCTRLREG_MMC0DAT1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT1_PINSEL_MMC0DAT1_SHIFT 8
#define PADCTRLREG_MMC0DAT1_PINSEL_MMC0DAT1_MASK 0x00000700
#define PADCTRLREG_MMC0DAT1_HYS_EN_MMC0DAT1_SHIFT 7
#define PADCTRLREG_MMC0DAT1_HYS_EN_MMC0DAT1_MASK 0x00000080
#define PADCTRLREG_MMC0DAT1_PDN_MMC0DAT1_SHIFT 6
#define PADCTRLREG_MMC0DAT1_PDN_MMC0DAT1_MASK 0x00000040
#define PADCTRLREG_MMC0DAT1_PUP_MMC0DAT1_SHIFT 5
#define PADCTRLREG_MMC0DAT1_PUP_MMC0DAT1_MASK 0x00000020
#define PADCTRLREG_MMC0DAT1_SRC_MMC0DAT1_SHIFT 4
#define PADCTRLREG_MMC0DAT1_SRC_MMC0DAT1_MASK 0x00000010
#define PADCTRLREG_MMC0DAT1_IND_MMC0DAT1_SHIFT 3
#define PADCTRLREG_MMC0DAT1_IND_MMC0DAT1_MASK 0x00000008
#define PADCTRLREG_MMC0DAT1_SEL_2_MMC0DAT1_SHIFT 2
#define PADCTRLREG_MMC0DAT1_SEL_2_MMC0DAT1_MASK 0x00000004
#define PADCTRLREG_MMC0DAT1_SEL_1_MMC0DAT1_SHIFT 1
#define PADCTRLREG_MMC0DAT1_SEL_1_MMC0DAT1_MASK 0x00000002
#define PADCTRLREG_MMC0DAT1_SEL_0_MMC0DAT1_SHIFT 0
#define PADCTRLREG_MMC0DAT1_SEL_0_MMC0DAT1_MASK 0x00000001
#define PADCTRLREG_MMC0DAT2_OFFSET 0x0000011C
#define PADCTRLREG_MMC0DAT2_TYPE UInt32
#define PADCTRLREG_MMC0DAT2_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT2_PINSEL_MMC0DAT2_SHIFT 8
#define PADCTRLREG_MMC0DAT2_PINSEL_MMC0DAT2_MASK 0x00000700
#define PADCTRLREG_MMC0DAT2_HYS_EN_MMC0DAT2_SHIFT 7
#define PADCTRLREG_MMC0DAT2_HYS_EN_MMC0DAT2_MASK 0x00000080
#define PADCTRLREG_MMC0DAT2_PDN_MMC0DAT2_SHIFT 6
#define PADCTRLREG_MMC0DAT2_PDN_MMC0DAT2_MASK 0x00000040
#define PADCTRLREG_MMC0DAT2_PUP_MMC0DAT2_SHIFT 5
#define PADCTRLREG_MMC0DAT2_PUP_MMC0DAT2_MASK 0x00000020
#define PADCTRLREG_MMC0DAT2_SRC_MMC0DAT2_SHIFT 4
#define PADCTRLREG_MMC0DAT2_SRC_MMC0DAT2_MASK 0x00000010
#define PADCTRLREG_MMC0DAT2_IND_MMC0DAT2_SHIFT 3
#define PADCTRLREG_MMC0DAT2_IND_MMC0DAT2_MASK 0x00000008
#define PADCTRLREG_MMC0DAT2_SEL_2_MMC0DAT2_SHIFT 2
#define PADCTRLREG_MMC0DAT2_SEL_2_MMC0DAT2_MASK 0x00000004
#define PADCTRLREG_MMC0DAT2_SEL_1_MMC0DAT2_SHIFT 1
#define PADCTRLREG_MMC0DAT2_SEL_1_MMC0DAT2_MASK 0x00000002
#define PADCTRLREG_MMC0DAT2_SEL_0_MMC0DAT2_SHIFT 0
#define PADCTRLREG_MMC0DAT2_SEL_0_MMC0DAT2_MASK 0x00000001
#define PADCTRLREG_MMC0DAT3_OFFSET 0x00000120
#define PADCTRLREG_MMC0DAT3_TYPE UInt32
#define PADCTRLREG_MMC0DAT3_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT3_PINSEL_MMC0DAT3_SHIFT 8
#define PADCTRLREG_MMC0DAT3_PINSEL_MMC0DAT3_MASK 0x00000700
#define PADCTRLREG_MMC0DAT3_HYS_EN_MMC0DAT3_SHIFT 7
#define PADCTRLREG_MMC0DAT3_HYS_EN_MMC0DAT3_MASK 0x00000080
#define PADCTRLREG_MMC0DAT3_PDN_MMC0DAT3_SHIFT 6
#define PADCTRLREG_MMC0DAT3_PDN_MMC0DAT3_MASK 0x00000040
#define PADCTRLREG_MMC0DAT3_PUP_MMC0DAT3_SHIFT 5
#define PADCTRLREG_MMC0DAT3_PUP_MMC0DAT3_MASK 0x00000020
#define PADCTRLREG_MMC0DAT3_SRC_MMC0DAT3_SHIFT 4
#define PADCTRLREG_MMC0DAT3_SRC_MMC0DAT3_MASK 0x00000010
#define PADCTRLREG_MMC0DAT3_IND_MMC0DAT3_SHIFT 3
#define PADCTRLREG_MMC0DAT3_IND_MMC0DAT3_MASK 0x00000008
#define PADCTRLREG_MMC0DAT3_SEL_2_MMC0DAT3_SHIFT 2
#define PADCTRLREG_MMC0DAT3_SEL_2_MMC0DAT3_MASK 0x00000004
#define PADCTRLREG_MMC0DAT3_SEL_1_MMC0DAT3_SHIFT 1
#define PADCTRLREG_MMC0DAT3_SEL_1_MMC0DAT3_MASK 0x00000002
#define PADCTRLREG_MMC0DAT3_SEL_0_MMC0DAT3_SHIFT 0
#define PADCTRLREG_MMC0DAT3_SEL_0_MMC0DAT3_MASK 0x00000001
#define PADCTRLREG_MMC0DAT4_OFFSET 0x00000124
#define PADCTRLREG_MMC0DAT4_TYPE UInt32
#define PADCTRLREG_MMC0DAT4_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT4_PINSEL_MMC0DAT4_SHIFT 8
#define PADCTRLREG_MMC0DAT4_PINSEL_MMC0DAT4_MASK 0x00000700
#define PADCTRLREG_MMC0DAT4_HYS_EN_MMC0DAT4_SHIFT 7
#define PADCTRLREG_MMC0DAT4_HYS_EN_MMC0DAT4_MASK 0x00000080
#define PADCTRLREG_MMC0DAT4_PDN_MMC0DAT4_SHIFT 6
#define PADCTRLREG_MMC0DAT4_PDN_MMC0DAT4_MASK 0x00000040
#define PADCTRLREG_MMC0DAT4_PUP_MMC0DAT4_SHIFT 5
#define PADCTRLREG_MMC0DAT4_PUP_MMC0DAT4_MASK 0x00000020
#define PADCTRLREG_MMC0DAT4_SRC_MMC0DAT4_SHIFT 4
#define PADCTRLREG_MMC0DAT4_SRC_MMC0DAT4_MASK 0x00000010
#define PADCTRLREG_MMC0DAT4_IND_MMC0DAT4_SHIFT 3
#define PADCTRLREG_MMC0DAT4_IND_MMC0DAT4_MASK 0x00000008
#define PADCTRLREG_MMC0DAT4_SEL_2_MMC0DAT4_SHIFT 2
#define PADCTRLREG_MMC0DAT4_SEL_2_MMC0DAT4_MASK 0x00000004
#define PADCTRLREG_MMC0DAT4_SEL_1_MMC0DAT4_SHIFT 1
#define PADCTRLREG_MMC0DAT4_SEL_1_MMC0DAT4_MASK 0x00000002
#define PADCTRLREG_MMC0DAT4_SEL_0_MMC0DAT4_SHIFT 0
#define PADCTRLREG_MMC0DAT4_SEL_0_MMC0DAT4_MASK 0x00000001
#define PADCTRLREG_MMC0DAT5_OFFSET 0x00000128
#define PADCTRLREG_MMC0DAT5_TYPE UInt32
#define PADCTRLREG_MMC0DAT5_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT5_PINSEL_MMC0DAT5_SHIFT 8
#define PADCTRLREG_MMC0DAT5_PINSEL_MMC0DAT5_MASK 0x00000700
#define PADCTRLREG_MMC0DAT5_HYS_EN_MMC0DAT5_SHIFT 7
#define PADCTRLREG_MMC0DAT5_HYS_EN_MMC0DAT5_MASK 0x00000080
#define PADCTRLREG_MMC0DAT5_PDN_MMC0DAT5_SHIFT 6
#define PADCTRLREG_MMC0DAT5_PDN_MMC0DAT5_MASK 0x00000040
#define PADCTRLREG_MMC0DAT5_PUP_MMC0DAT5_SHIFT 5
#define PADCTRLREG_MMC0DAT5_PUP_MMC0DAT5_MASK 0x00000020
#define PADCTRLREG_MMC0DAT5_SRC_MMC0DAT5_SHIFT 4
#define PADCTRLREG_MMC0DAT5_SRC_MMC0DAT5_MASK 0x00000010
#define PADCTRLREG_MMC0DAT5_IND_MMC0DAT5_SHIFT 3
#define PADCTRLREG_MMC0DAT5_IND_MMC0DAT5_MASK 0x00000008
#define PADCTRLREG_MMC0DAT5_SEL_2_MMC0DAT5_SHIFT 2
#define PADCTRLREG_MMC0DAT5_SEL_2_MMC0DAT5_MASK 0x00000004
#define PADCTRLREG_MMC0DAT5_SEL_1_MMC0DAT5_SHIFT 1
#define PADCTRLREG_MMC0DAT5_SEL_1_MMC0DAT5_MASK 0x00000002
#define PADCTRLREG_MMC0DAT5_SEL_0_MMC0DAT5_SHIFT 0
#define PADCTRLREG_MMC0DAT5_SEL_0_MMC0DAT5_MASK 0x00000001
#define PADCTRLREG_MMC0DAT6_OFFSET 0x0000012C
#define PADCTRLREG_MMC0DAT6_TYPE UInt32
#define PADCTRLREG_MMC0DAT6_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT6_PINSEL_MMC0DAT6_SHIFT 8
#define PADCTRLREG_MMC0DAT6_PINSEL_MMC0DAT6_MASK 0x00000700
#define PADCTRLREG_MMC0DAT6_HYS_EN_MMC0DAT6_SHIFT 7
#define PADCTRLREG_MMC0DAT6_HYS_EN_MMC0DAT6_MASK 0x00000080
#define PADCTRLREG_MMC0DAT6_PDN_MMC0DAT6_SHIFT 6
#define PADCTRLREG_MMC0DAT6_PDN_MMC0DAT6_MASK 0x00000040
#define PADCTRLREG_MMC0DAT6_PUP_MMC0DAT6_SHIFT 5
#define PADCTRLREG_MMC0DAT6_PUP_MMC0DAT6_MASK 0x00000020
#define PADCTRLREG_MMC0DAT6_SRC_MMC0DAT6_SHIFT 4
#define PADCTRLREG_MMC0DAT6_SRC_MMC0DAT6_MASK 0x00000010
#define PADCTRLREG_MMC0DAT6_IND_MMC0DAT6_SHIFT 3
#define PADCTRLREG_MMC0DAT6_IND_MMC0DAT6_MASK 0x00000008
#define PADCTRLREG_MMC0DAT6_SEL_2_MMC0DAT6_SHIFT 2
#define PADCTRLREG_MMC0DAT6_SEL_2_MMC0DAT6_MASK 0x00000004
#define PADCTRLREG_MMC0DAT6_SEL_1_MMC0DAT6_SHIFT 1
#define PADCTRLREG_MMC0DAT6_SEL_1_MMC0DAT6_MASK 0x00000002
#define PADCTRLREG_MMC0DAT6_SEL_0_MMC0DAT6_SHIFT 0
#define PADCTRLREG_MMC0DAT6_SEL_0_MMC0DAT6_MASK 0x00000001
#define PADCTRLREG_MMC0DAT7_OFFSET 0x00000130
#define PADCTRLREG_MMC0DAT7_TYPE UInt32
#define PADCTRLREG_MMC0DAT7_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0DAT7_PINSEL_MMC0DAT7_SHIFT 8
#define PADCTRLREG_MMC0DAT7_PINSEL_MMC0DAT7_MASK 0x00000700
#define PADCTRLREG_MMC0DAT7_HYS_EN_MMC0DAT7_SHIFT 7
#define PADCTRLREG_MMC0DAT7_HYS_EN_MMC0DAT7_MASK 0x00000080
#define PADCTRLREG_MMC0DAT7_PDN_MMC0DAT7_SHIFT 6
#define PADCTRLREG_MMC0DAT7_PDN_MMC0DAT7_MASK 0x00000040
#define PADCTRLREG_MMC0DAT7_PUP_MMC0DAT7_SHIFT 5
#define PADCTRLREG_MMC0DAT7_PUP_MMC0DAT7_MASK 0x00000020
#define PADCTRLREG_MMC0DAT7_SRC_MMC0DAT7_SHIFT 4
#define PADCTRLREG_MMC0DAT7_SRC_MMC0DAT7_MASK 0x00000010
#define PADCTRLREG_MMC0DAT7_IND_MMC0DAT7_SHIFT 3
#define PADCTRLREG_MMC0DAT7_IND_MMC0DAT7_MASK 0x00000008
#define PADCTRLREG_MMC0DAT7_SEL_2_MMC0DAT7_SHIFT 2
#define PADCTRLREG_MMC0DAT7_SEL_2_MMC0DAT7_MASK 0x00000004
#define PADCTRLREG_MMC0DAT7_SEL_1_MMC0DAT7_SHIFT 1
#define PADCTRLREG_MMC0DAT7_SEL_1_MMC0DAT7_MASK 0x00000002
#define PADCTRLREG_MMC0DAT7_SEL_0_MMC0DAT7_SHIFT 0
#define PADCTRLREG_MMC0DAT7_SEL_0_MMC0DAT7_MASK 0x00000001
#define PADCTRLREG_MMC0RST_OFFSET 0x00000134
#define PADCTRLREG_MMC0RST_TYPE UInt32
#define PADCTRLREG_MMC0RST_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC0RST_PINSEL_MMC0RST_SHIFT 8
#define PADCTRLREG_MMC0RST_PINSEL_MMC0RST_MASK 0x00000700
#define PADCTRLREG_MMC0RST_HYS_EN_MMC0RST_SHIFT 7
#define PADCTRLREG_MMC0RST_HYS_EN_MMC0RST_MASK 0x00000080
#define PADCTRLREG_MMC0RST_PDN_MMC0RST_SHIFT 6
#define PADCTRLREG_MMC0RST_PDN_MMC0RST_MASK 0x00000040
#define PADCTRLREG_MMC0RST_PUP_MMC0RST_SHIFT 5
#define PADCTRLREG_MMC0RST_PUP_MMC0RST_MASK 0x00000020
#define PADCTRLREG_MMC0RST_SRC_MMC0RST_SHIFT 4
#define PADCTRLREG_MMC0RST_SRC_MMC0RST_MASK 0x00000010
#define PADCTRLREG_MMC0RST_IND_MMC0RST_SHIFT 3
#define PADCTRLREG_MMC0RST_IND_MMC0RST_MASK 0x00000008
#define PADCTRLREG_MMC0RST_SEL_2_MMC0RST_SHIFT 2
#define PADCTRLREG_MMC0RST_SEL_2_MMC0RST_MASK 0x00000004
#define PADCTRLREG_MMC0RST_SEL_1_MMC0RST_SHIFT 1
#define PADCTRLREG_MMC0RST_SEL_1_MMC0RST_MASK 0x00000002
#define PADCTRLREG_MMC0RST_SEL_0_MMC0RST_SHIFT 0
#define PADCTRLREG_MMC0RST_SEL_0_MMC0RST_MASK 0x00000001
#define PADCTRLREG_MMC1CK_OFFSET 0x00000138
#define PADCTRLREG_MMC1CK_TYPE UInt32
#define PADCTRLREG_MMC1CK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1CK_PINSEL_MMC1CK_SHIFT 8
#define PADCTRLREG_MMC1CK_PINSEL_MMC1CK_MASK 0x00000700
#define PADCTRLREG_MMC1CK_HYS_EN_MMC1CK_SHIFT 7
#define PADCTRLREG_MMC1CK_HYS_EN_MMC1CK_MASK 0x00000080
#define PADCTRLREG_MMC1CK_PDN_MMC1CK_SHIFT 6
#define PADCTRLREG_MMC1CK_PDN_MMC1CK_MASK 0x00000040
#define PADCTRLREG_MMC1CK_PUP_MMC1CK_SHIFT 5
#define PADCTRLREG_MMC1CK_PUP_MMC1CK_MASK 0x00000020
#define PADCTRLREG_MMC1CK_SRC_MMC1CK_SHIFT 4
#define PADCTRLREG_MMC1CK_SRC_MMC1CK_MASK 0x00000010
#define PADCTRLREG_MMC1CK_IND_MMC1CK_SHIFT 3
#define PADCTRLREG_MMC1CK_IND_MMC1CK_MASK 0x00000008
#define PADCTRLREG_MMC1CK_SEL_2_MMC1CK_SHIFT 2
#define PADCTRLREG_MMC1CK_SEL_2_MMC1CK_MASK 0x00000004
#define PADCTRLREG_MMC1CK_SEL_1_MMC1CK_SHIFT 1
#define PADCTRLREG_MMC1CK_SEL_1_MMC1CK_MASK 0x00000002
#define PADCTRLREG_MMC1CK_SEL_0_MMC1CK_SHIFT 0
#define PADCTRLREG_MMC1CK_SEL_0_MMC1CK_MASK 0x00000001
#define PADCTRLREG_MMC1CMD_OFFSET 0x0000013C
#define PADCTRLREG_MMC1CMD_TYPE UInt32
#define PADCTRLREG_MMC1CMD_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1CMD_PINSEL_MMC1CMD_SHIFT 8
#define PADCTRLREG_MMC1CMD_PINSEL_MMC1CMD_MASK 0x00000700
#define PADCTRLREG_MMC1CMD_HYS_EN_MMC1CMD_SHIFT 7
#define PADCTRLREG_MMC1CMD_HYS_EN_MMC1CMD_MASK 0x00000080
#define PADCTRLREG_MMC1CMD_PDN_MMC1CMD_SHIFT 6
#define PADCTRLREG_MMC1CMD_PDN_MMC1CMD_MASK 0x00000040
#define PADCTRLREG_MMC1CMD_PUP_MMC1CMD_SHIFT 5
#define PADCTRLREG_MMC1CMD_PUP_MMC1CMD_MASK 0x00000020
#define PADCTRLREG_MMC1CMD_SRC_MMC1CMD_SHIFT 4
#define PADCTRLREG_MMC1CMD_SRC_MMC1CMD_MASK 0x00000010
#define PADCTRLREG_MMC1CMD_IND_MMC1CMD_SHIFT 3
#define PADCTRLREG_MMC1CMD_IND_MMC1CMD_MASK 0x00000008
#define PADCTRLREG_MMC1CMD_SEL_2_MMC1CMD_SHIFT 2
#define PADCTRLREG_MMC1CMD_SEL_2_MMC1CMD_MASK 0x00000004
#define PADCTRLREG_MMC1CMD_SEL_1_MMC1CMD_SHIFT 1
#define PADCTRLREG_MMC1CMD_SEL_1_MMC1CMD_MASK 0x00000002
#define PADCTRLREG_MMC1CMD_SEL_0_MMC1CMD_SHIFT 0
#define PADCTRLREG_MMC1CMD_SEL_0_MMC1CMD_MASK 0x00000001
#define PADCTRLREG_MMC1DAT0_OFFSET 0x00000140
#define PADCTRLREG_MMC1DAT0_TYPE UInt32
#define PADCTRLREG_MMC1DAT0_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT0_PINSEL_MMC1DAT0_SHIFT 8
#define PADCTRLREG_MMC1DAT0_PINSEL_MMC1DAT0_MASK 0x00000700
#define PADCTRLREG_MMC1DAT0_HYS_EN_MMC1DAT0_SHIFT 7
#define PADCTRLREG_MMC1DAT0_HYS_EN_MMC1DAT0_MASK 0x00000080
#define PADCTRLREG_MMC1DAT0_PDN_MMC1DAT0_SHIFT 6
#define PADCTRLREG_MMC1DAT0_PDN_MMC1DAT0_MASK 0x00000040
#define PADCTRLREG_MMC1DAT0_PUP_MMC1DAT0_SHIFT 5
#define PADCTRLREG_MMC1DAT0_PUP_MMC1DAT0_MASK 0x00000020
#define PADCTRLREG_MMC1DAT0_SRC_MMC1DAT0_SHIFT 4
#define PADCTRLREG_MMC1DAT0_SRC_MMC1DAT0_MASK 0x00000010
#define PADCTRLREG_MMC1DAT0_IND_MMC1DAT0_SHIFT 3
#define PADCTRLREG_MMC1DAT0_IND_MMC1DAT0_MASK 0x00000008
#define PADCTRLREG_MMC1DAT0_SEL_2_MMC1DAT0_SHIFT 2
#define PADCTRLREG_MMC1DAT0_SEL_2_MMC1DAT0_MASK 0x00000004
#define PADCTRLREG_MMC1DAT0_SEL_1_MMC1DAT0_SHIFT 1
#define PADCTRLREG_MMC1DAT0_SEL_1_MMC1DAT0_MASK 0x00000002
#define PADCTRLREG_MMC1DAT0_SEL_0_MMC1DAT0_SHIFT 0
#define PADCTRLREG_MMC1DAT0_SEL_0_MMC1DAT0_MASK 0x00000001
#define PADCTRLREG_MMC1DAT1_OFFSET 0x00000144
#define PADCTRLREG_MMC1DAT1_TYPE UInt32
#define PADCTRLREG_MMC1DAT1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT1_PINSEL_MMC1DAT1_SHIFT 8
#define PADCTRLREG_MMC1DAT1_PINSEL_MMC1DAT1_MASK 0x00000700
#define PADCTRLREG_MMC1DAT1_HYS_EN_MMC1DAT1_SHIFT 7
#define PADCTRLREG_MMC1DAT1_HYS_EN_MMC1DAT1_MASK 0x00000080
#define PADCTRLREG_MMC1DAT1_PDN_MMC1DAT1_SHIFT 6
#define PADCTRLREG_MMC1DAT1_PDN_MMC1DAT1_MASK 0x00000040
#define PADCTRLREG_MMC1DAT1_PUP_MMC1DAT1_SHIFT 5
#define PADCTRLREG_MMC1DAT1_PUP_MMC1DAT1_MASK 0x00000020
#define PADCTRLREG_MMC1DAT1_SRC_MMC1DAT1_SHIFT 4
#define PADCTRLREG_MMC1DAT1_SRC_MMC1DAT1_MASK 0x00000010
#define PADCTRLREG_MMC1DAT1_IND_MMC1DAT1_SHIFT 3
#define PADCTRLREG_MMC1DAT1_IND_MMC1DAT1_MASK 0x00000008
#define PADCTRLREG_MMC1DAT1_SEL_2_MMC1DAT1_SHIFT 2
#define PADCTRLREG_MMC1DAT1_SEL_2_MMC1DAT1_MASK 0x00000004
#define PADCTRLREG_MMC1DAT1_SEL_1_MMC1DAT1_SHIFT 1
#define PADCTRLREG_MMC1DAT1_SEL_1_MMC1DAT1_MASK 0x00000002
#define PADCTRLREG_MMC1DAT1_SEL_0_MMC1DAT1_SHIFT 0
#define PADCTRLREG_MMC1DAT1_SEL_0_MMC1DAT1_MASK 0x00000001
#define PADCTRLREG_MMC1DAT2_OFFSET 0x00000148
#define PADCTRLREG_MMC1DAT2_TYPE UInt32
#define PADCTRLREG_MMC1DAT2_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT2_PINSEL_MMC1DAT2_SHIFT 8
#define PADCTRLREG_MMC1DAT2_PINSEL_MMC1DAT2_MASK 0x00000700
#define PADCTRLREG_MMC1DAT2_HYS_EN_MMC1DAT2_SHIFT 7
#define PADCTRLREG_MMC1DAT2_HYS_EN_MMC1DAT2_MASK 0x00000080
#define PADCTRLREG_MMC1DAT2_PDN_MMC1DAT2_SHIFT 6
#define PADCTRLREG_MMC1DAT2_PDN_MMC1DAT2_MASK 0x00000040
#define PADCTRLREG_MMC1DAT2_PUP_MMC1DAT2_SHIFT 5
#define PADCTRLREG_MMC1DAT2_PUP_MMC1DAT2_MASK 0x00000020
#define PADCTRLREG_MMC1DAT2_SRC_MMC1DAT2_SHIFT 4
#define PADCTRLREG_MMC1DAT2_SRC_MMC1DAT2_MASK 0x00000010
#define PADCTRLREG_MMC1DAT2_IND_MMC1DAT2_SHIFT 3
#define PADCTRLREG_MMC1DAT2_IND_MMC1DAT2_MASK 0x00000008
#define PADCTRLREG_MMC1DAT2_SEL_2_MMC1DAT2_SHIFT 2
#define PADCTRLREG_MMC1DAT2_SEL_2_MMC1DAT2_MASK 0x00000004
#define PADCTRLREG_MMC1DAT2_SEL_1_MMC1DAT2_SHIFT 1
#define PADCTRLREG_MMC1DAT2_SEL_1_MMC1DAT2_MASK 0x00000002
#define PADCTRLREG_MMC1DAT2_SEL_0_MMC1DAT2_SHIFT 0
#define PADCTRLREG_MMC1DAT2_SEL_0_MMC1DAT2_MASK 0x00000001
#define PADCTRLREG_MMC1DAT3_OFFSET 0x0000014C
#define PADCTRLREG_MMC1DAT3_TYPE UInt32
#define PADCTRLREG_MMC1DAT3_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT3_PINSEL_MMC1DAT3_SHIFT 8
#define PADCTRLREG_MMC1DAT3_PINSEL_MMC1DAT3_MASK 0x00000700
#define PADCTRLREG_MMC1DAT3_HYS_EN_MMC1DAT3_SHIFT 7
#define PADCTRLREG_MMC1DAT3_HYS_EN_MMC1DAT3_MASK 0x00000080
#define PADCTRLREG_MMC1DAT3_PDN_MMC1DAT3_SHIFT 6
#define PADCTRLREG_MMC1DAT3_PDN_MMC1DAT3_MASK 0x00000040
#define PADCTRLREG_MMC1DAT3_PUP_MMC1DAT3_SHIFT 5
#define PADCTRLREG_MMC1DAT3_PUP_MMC1DAT3_MASK 0x00000020
#define PADCTRLREG_MMC1DAT3_SRC_MMC1DAT3_SHIFT 4
#define PADCTRLREG_MMC1DAT3_SRC_MMC1DAT3_MASK 0x00000010
#define PADCTRLREG_MMC1DAT3_IND_MMC1DAT3_SHIFT 3
#define PADCTRLREG_MMC1DAT3_IND_MMC1DAT3_MASK 0x00000008
#define PADCTRLREG_MMC1DAT3_SEL_2_MMC1DAT3_SHIFT 2
#define PADCTRLREG_MMC1DAT3_SEL_2_MMC1DAT3_MASK 0x00000004
#define PADCTRLREG_MMC1DAT3_SEL_1_MMC1DAT3_SHIFT 1
#define PADCTRLREG_MMC1DAT3_SEL_1_MMC1DAT3_MASK 0x00000002
#define PADCTRLREG_MMC1DAT3_SEL_0_MMC1DAT3_SHIFT 0
#define PADCTRLREG_MMC1DAT3_SEL_0_MMC1DAT3_MASK 0x00000001
#define PADCTRLREG_MMC1DAT4_OFFSET 0x00000150
#define PADCTRLREG_MMC1DAT4_TYPE UInt32
#define PADCTRLREG_MMC1DAT4_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT4_PINSEL_MMC1DAT4_SHIFT 8
#define PADCTRLREG_MMC1DAT4_PINSEL_MMC1DAT4_MASK 0x00000700
#define PADCTRLREG_MMC1DAT4_HYS_EN_MMC1DAT4_SHIFT 7
#define PADCTRLREG_MMC1DAT4_HYS_EN_MMC1DAT4_MASK 0x00000080
#define PADCTRLREG_MMC1DAT4_PDN_MMC1DAT4_SHIFT 6
#define PADCTRLREG_MMC1DAT4_PDN_MMC1DAT4_MASK 0x00000040
#define PADCTRLREG_MMC1DAT4_PUP_MMC1DAT4_SHIFT 5
#define PADCTRLREG_MMC1DAT4_PUP_MMC1DAT4_MASK 0x00000020
#define PADCTRLREG_MMC1DAT4_SRC_MMC1DAT4_SHIFT 4
#define PADCTRLREG_MMC1DAT4_SRC_MMC1DAT4_MASK 0x00000010
#define PADCTRLREG_MMC1DAT4_IND_MMC1DAT4_SHIFT 3
#define PADCTRLREG_MMC1DAT4_IND_MMC1DAT4_MASK 0x00000008
#define PADCTRLREG_MMC1DAT4_SEL_2_MMC1DAT4_SHIFT 2
#define PADCTRLREG_MMC1DAT4_SEL_2_MMC1DAT4_MASK 0x00000004
#define PADCTRLREG_MMC1DAT4_SEL_1_MMC1DAT4_SHIFT 1
#define PADCTRLREG_MMC1DAT4_SEL_1_MMC1DAT4_MASK 0x00000002
#define PADCTRLREG_MMC1DAT4_SEL_0_MMC1DAT4_SHIFT 0
#define PADCTRLREG_MMC1DAT4_SEL_0_MMC1DAT4_MASK 0x00000001
#define PADCTRLREG_MMC1DAT5_OFFSET 0x00000154
#define PADCTRLREG_MMC1DAT5_TYPE UInt32
#define PADCTRLREG_MMC1DAT5_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT5_PINSEL_MMC1DAT5_SHIFT 8
#define PADCTRLREG_MMC1DAT5_PINSEL_MMC1DAT5_MASK 0x00000700
#define PADCTRLREG_MMC1DAT5_HYS_EN_MMC1DAT5_SHIFT 7
#define PADCTRLREG_MMC1DAT5_HYS_EN_MMC1DAT5_MASK 0x00000080
#define PADCTRLREG_MMC1DAT5_PDN_MMC1DAT5_SHIFT 6
#define PADCTRLREG_MMC1DAT5_PDN_MMC1DAT5_MASK 0x00000040
#define PADCTRLREG_MMC1DAT5_PUP_MMC1DAT5_SHIFT 5
#define PADCTRLREG_MMC1DAT5_PUP_MMC1DAT5_MASK 0x00000020
#define PADCTRLREG_MMC1DAT5_SRC_MMC1DAT5_SHIFT 4
#define PADCTRLREG_MMC1DAT5_SRC_MMC1DAT5_MASK 0x00000010
#define PADCTRLREG_MMC1DAT5_IND_MMC1DAT5_SHIFT 3
#define PADCTRLREG_MMC1DAT5_IND_MMC1DAT5_MASK 0x00000008
#define PADCTRLREG_MMC1DAT5_SEL_2_MMC1DAT5_SHIFT 2
#define PADCTRLREG_MMC1DAT5_SEL_2_MMC1DAT5_MASK 0x00000004
#define PADCTRLREG_MMC1DAT5_SEL_1_MMC1DAT5_SHIFT 1
#define PADCTRLREG_MMC1DAT5_SEL_1_MMC1DAT5_MASK 0x00000002
#define PADCTRLREG_MMC1DAT5_SEL_0_MMC1DAT5_SHIFT 0
#define PADCTRLREG_MMC1DAT5_SEL_0_MMC1DAT5_MASK 0x00000001
#define PADCTRLREG_MMC1DAT6_OFFSET 0x00000158
#define PADCTRLREG_MMC1DAT6_TYPE UInt32
#define PADCTRLREG_MMC1DAT6_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT6_PINSEL_MMC1DAT6_SHIFT 8
#define PADCTRLREG_MMC1DAT6_PINSEL_MMC1DAT6_MASK 0x00000700
#define PADCTRLREG_MMC1DAT6_HYS_EN_MMC1DAT6_SHIFT 7
#define PADCTRLREG_MMC1DAT6_HYS_EN_MMC1DAT6_MASK 0x00000080
#define PADCTRLREG_MMC1DAT6_PDN_MMC1DAT6_SHIFT 6
#define PADCTRLREG_MMC1DAT6_PDN_MMC1DAT6_MASK 0x00000040
#define PADCTRLREG_MMC1DAT6_PUP_MMC1DAT6_SHIFT 5
#define PADCTRLREG_MMC1DAT6_PUP_MMC1DAT6_MASK 0x00000020
#define PADCTRLREG_MMC1DAT6_SRC_MMC1DAT6_SHIFT 4
#define PADCTRLREG_MMC1DAT6_SRC_MMC1DAT6_MASK 0x00000010
#define PADCTRLREG_MMC1DAT6_IND_MMC1DAT6_SHIFT 3
#define PADCTRLREG_MMC1DAT6_IND_MMC1DAT6_MASK 0x00000008
#define PADCTRLREG_MMC1DAT6_SEL_2_MMC1DAT6_SHIFT 2
#define PADCTRLREG_MMC1DAT6_SEL_2_MMC1DAT6_MASK 0x00000004
#define PADCTRLREG_MMC1DAT6_SEL_1_MMC1DAT6_SHIFT 1
#define PADCTRLREG_MMC1DAT6_SEL_1_MMC1DAT6_MASK 0x00000002
#define PADCTRLREG_MMC1DAT6_SEL_0_MMC1DAT6_SHIFT 0
#define PADCTRLREG_MMC1DAT6_SEL_0_MMC1DAT6_MASK 0x00000001
#define PADCTRLREG_MMC1DAT7_OFFSET 0x0000015C
#define PADCTRLREG_MMC1DAT7_TYPE UInt32
#define PADCTRLREG_MMC1DAT7_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1DAT7_PINSEL_MMC1DAT7_SHIFT 8
#define PADCTRLREG_MMC1DAT7_PINSEL_MMC1DAT7_MASK 0x00000700
#define PADCTRLREG_MMC1DAT7_HYS_EN_MMC1DAT7_SHIFT 7
#define PADCTRLREG_MMC1DAT7_HYS_EN_MMC1DAT7_MASK 0x00000080
#define PADCTRLREG_MMC1DAT7_PDN_MMC1DAT7_SHIFT 6
#define PADCTRLREG_MMC1DAT7_PDN_MMC1DAT7_MASK 0x00000040
#define PADCTRLREG_MMC1DAT7_PUP_MMC1DAT7_SHIFT 5
#define PADCTRLREG_MMC1DAT7_PUP_MMC1DAT7_MASK 0x00000020
#define PADCTRLREG_MMC1DAT7_SRC_MMC1DAT7_SHIFT 4
#define PADCTRLREG_MMC1DAT7_SRC_MMC1DAT7_MASK 0x00000010
#define PADCTRLREG_MMC1DAT7_IND_MMC1DAT7_SHIFT 3
#define PADCTRLREG_MMC1DAT7_IND_MMC1DAT7_MASK 0x00000008
#define PADCTRLREG_MMC1DAT7_SEL_2_MMC1DAT7_SHIFT 2
#define PADCTRLREG_MMC1DAT7_SEL_2_MMC1DAT7_MASK 0x00000004
#define PADCTRLREG_MMC1DAT7_SEL_1_MMC1DAT7_SHIFT 1
#define PADCTRLREG_MMC1DAT7_SEL_1_MMC1DAT7_MASK 0x00000002
#define PADCTRLREG_MMC1DAT7_SEL_0_MMC1DAT7_SHIFT 0
#define PADCTRLREG_MMC1DAT7_SEL_0_MMC1DAT7_MASK 0x00000001
#define PADCTRLREG_MMC1RST_OFFSET 0x00000160
#define PADCTRLREG_MMC1RST_TYPE UInt32
#define PADCTRLREG_MMC1RST_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_MMC1RST_PINSEL_MMC1RST_SHIFT 8
#define PADCTRLREG_MMC1RST_PINSEL_MMC1RST_MASK 0x00000700
#define PADCTRLREG_MMC1RST_HYS_EN_MMC1RST_SHIFT 7
#define PADCTRLREG_MMC1RST_HYS_EN_MMC1RST_MASK 0x00000080
#define PADCTRLREG_MMC1RST_PDN_MMC1RST_SHIFT 6
#define PADCTRLREG_MMC1RST_PDN_MMC1RST_MASK 0x00000040
#define PADCTRLREG_MMC1RST_PUP_MMC1RST_SHIFT 5
#define PADCTRLREG_MMC1RST_PUP_MMC1RST_MASK 0x00000020
#define PADCTRLREG_MMC1RST_SRC_MMC1RST_SHIFT 4
#define PADCTRLREG_MMC1RST_SRC_MMC1RST_MASK 0x00000010
#define PADCTRLREG_MMC1RST_IND_MMC1RST_SHIFT 3
#define PADCTRLREG_MMC1RST_IND_MMC1RST_MASK 0x00000008
#define PADCTRLREG_MMC1RST_SEL_2_MMC1RST_SHIFT 2
#define PADCTRLREG_MMC1RST_SEL_2_MMC1RST_MASK 0x00000004
#define PADCTRLREG_MMC1RST_SEL_1_MMC1RST_SHIFT 1
#define PADCTRLREG_MMC1RST_SEL_1_MMC1RST_MASK 0x00000002
#define PADCTRLREG_MMC1RST_SEL_0_MMC1RST_SHIFT 0
#define PADCTRLREG_MMC1RST_SEL_0_MMC1RST_MASK 0x00000001
#define PADCTRLREG_PC1_OFFSET 0x00000164
#define PADCTRLREG_PC1_TYPE UInt32
#define PADCTRLREG_PC1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_PC1_PINSEL_PC1_SHIFT 8
#define PADCTRLREG_PC1_PINSEL_PC1_MASK 0x00000700
#define PADCTRLREG_PC1_HYS_EN_PC1_SHIFT 7
#define PADCTRLREG_PC1_HYS_EN_PC1_MASK 0x00000080
#define PADCTRLREG_PC1_PDN_PC1_SHIFT 6
#define PADCTRLREG_PC1_PDN_PC1_MASK 0x00000040
#define PADCTRLREG_PC1_PUP_PC1_SHIFT 5
#define PADCTRLREG_PC1_PUP_PC1_MASK 0x00000020
#define PADCTRLREG_PC1_SRC_PC1_SHIFT 4
#define PADCTRLREG_PC1_SRC_PC1_MASK 0x00000010
#define PADCTRLREG_PC1_IND_PC1_SHIFT 3
#define PADCTRLREG_PC1_IND_PC1_MASK 0x00000008
#define PADCTRLREG_PC1_SEL_2_PC1_SHIFT 2
#define PADCTRLREG_PC1_SEL_2_PC1_MASK 0x00000004
#define PADCTRLREG_PC1_SEL_1_PC1_SHIFT 1
#define PADCTRLREG_PC1_SEL_1_PC1_MASK 0x00000002
#define PADCTRLREG_PC1_SEL_0_PC1_SHIFT 0
#define PADCTRLREG_PC1_SEL_0_PC1_MASK 0x00000001
#define PADCTRLREG_PC2_OFFSET 0x00000168
#define PADCTRLREG_PC2_TYPE UInt32
#define PADCTRLREG_PC2_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_PC2_PINSEL_PC2_SHIFT 8
#define PADCTRLREG_PC2_PINSEL_PC2_MASK 0x00000700
#define PADCTRLREG_PC2_HYS_EN_PC2_SHIFT 7
#define PADCTRLREG_PC2_HYS_EN_PC2_MASK 0x00000080
#define PADCTRLREG_PC2_PDN_PC2_SHIFT 6
#define PADCTRLREG_PC2_PDN_PC2_MASK 0x00000040
#define PADCTRLREG_PC2_PUP_PC2_SHIFT 5
#define PADCTRLREG_PC2_PUP_PC2_MASK 0x00000020
#define PADCTRLREG_PC2_SRC_PC2_SHIFT 4
#define PADCTRLREG_PC2_SRC_PC2_MASK 0x00000010
#define PADCTRLREG_PC2_IND_PC2_SHIFT 3
#define PADCTRLREG_PC2_IND_PC2_MASK 0x00000008
#define PADCTRLREG_PC2_SEL_2_PC2_SHIFT 2
#define PADCTRLREG_PC2_SEL_2_PC2_MASK 0x00000004
#define PADCTRLREG_PC2_SEL_1_PC2_SHIFT 1
#define PADCTRLREG_PC2_SEL_1_PC2_MASK 0x00000002
#define PADCTRLREG_PC2_SEL_0_PC2_SHIFT 0
#define PADCTRLREG_PC2_SEL_0_PC2_MASK 0x00000001
#define PADCTRLREG_PMBSCCLK_OFFSET 0x0000016C
#define PADCTRLREG_PMBSCCLK_TYPE UInt32
#define PADCTRLREG_PMBSCCLK_RESERVED_MASK 0xFFFFF8C7
#define PADCTRLREG_PMBSCCLK_PINSEL_PMBSCCLK_SHIFT 8
#define PADCTRLREG_PMBSCCLK_PINSEL_PMBSCCLK_MASK 0x00000700
#define PADCTRLREG_PMBSCCLK_PUP_PMBSCCLK_SHIFT 5
#define PADCTRLREG_PMBSCCLK_PUP_PMBSCCLK_MASK 0x00000020
#define PADCTRLREG_PMBSCCLK_SRC_PMBSCCLK_SHIFT 4
#define PADCTRLREG_PMBSCCLK_SRC_PMBSCCLK_MASK 0x00000010
#define PADCTRLREG_PMBSCCLK_IND_PMBSCCLK_SHIFT 3
#define PADCTRLREG_PMBSCCLK_IND_PMBSCCLK_MASK 0x00000008
#define PADCTRLREG_PMBSCDAT_OFFSET 0x00000170
#define PADCTRLREG_PMBSCDAT_TYPE UInt32
#define PADCTRLREG_PMBSCDAT_RESERVED_MASK 0xFFFFF8C7
#define PADCTRLREG_PMBSCDAT_PINSEL_PMBSCDAT_SHIFT 8
#define PADCTRLREG_PMBSCDAT_PINSEL_PMBSCDAT_MASK 0x00000700
#define PADCTRLREG_PMBSCDAT_PUP_PMBSCDAT_SHIFT 5
#define PADCTRLREG_PMBSCDAT_PUP_PMBSCDAT_MASK 0x00000020
#define PADCTRLREG_PMBSCDAT_SRC_PMBSCDAT_SHIFT 4
#define PADCTRLREG_PMBSCDAT_SRC_PMBSCDAT_MASK 0x00000010
#define PADCTRLREG_PMBSCDAT_IND_PMBSCDAT_SHIFT 3
#define PADCTRLREG_PMBSCDAT_IND_PMBSCDAT_MASK 0x00000008
#define PADCTRLREG_PMUINT_OFFSET 0x00000174
#define PADCTRLREG_PMUINT_TYPE UInt32
#define PADCTRLREG_PMUINT_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_PMUINT_PINSEL_PMUINT_SHIFT 8
#define PADCTRLREG_PMUINT_PINSEL_PMUINT_MASK 0x00000700
#define PADCTRLREG_PMUINT_HYS_EN_PMUINT_SHIFT 7
#define PADCTRLREG_PMUINT_HYS_EN_PMUINT_MASK 0x00000080
#define PADCTRLREG_PMUINT_PDN_PMUINT_SHIFT 6
#define PADCTRLREG_PMUINT_PDN_PMUINT_MASK 0x00000040
#define PADCTRLREG_PMUINT_PUP_PMUINT_SHIFT 5
#define PADCTRLREG_PMUINT_PUP_PMUINT_MASK 0x00000020
#define PADCTRLREG_PMUINT_SRC_PMUINT_SHIFT 4
#define PADCTRLREG_PMUINT_SRC_PMUINT_MASK 0x00000010
#define PADCTRLREG_PMUINT_IND_PMUINT_SHIFT 3
#define PADCTRLREG_PMUINT_IND_PMUINT_MASK 0x00000008
#define PADCTRLREG_PMUINT_SEL_2_PMUINT_SHIFT 2
#define PADCTRLREG_PMUINT_SEL_2_PMUINT_MASK 0x00000004
#define PADCTRLREG_PMUINT_SEL_1_PMUINT_SHIFT 1
#define PADCTRLREG_PMUINT_SEL_1_PMUINT_MASK 0x00000002
#define PADCTRLREG_PMUINT_SEL_0_PMUINT_SHIFT 0
#define PADCTRLREG_PMUINT_SEL_0_PMUINT_MASK 0x00000001
#define PADCTRLREG_RESETN_OFFSET 0x00000178
#define PADCTRLREG_RESETN_TYPE UInt32
#define PADCTRLREG_RESETN_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_RESETN_PINSEL_RESETN_SHIFT 8
#define PADCTRLREG_RESETN_PINSEL_RESETN_MASK 0x00000700
#define PADCTRLREG_RESETN_HYS_EN_RESETN_SHIFT 7
#define PADCTRLREG_RESETN_HYS_EN_RESETN_MASK 0x00000080
#define PADCTRLREG_RESETN_PDN_RESETN_SHIFT 6
#define PADCTRLREG_RESETN_PDN_RESETN_MASK 0x00000040
#define PADCTRLREG_RESETN_PUP_RESETN_SHIFT 5
#define PADCTRLREG_RESETN_PUP_RESETN_MASK 0x00000020
#define PADCTRLREG_RESETN_SRC_RESETN_SHIFT 4
#define PADCTRLREG_RESETN_SRC_RESETN_MASK 0x00000010
#define PADCTRLREG_RESETN_IND_RESETN_SHIFT 3
#define PADCTRLREG_RESETN_IND_RESETN_MASK 0x00000008
#define PADCTRLREG_RESETN_SEL_2_RESETN_SHIFT 2
#define PADCTRLREG_RESETN_SEL_2_RESETN_MASK 0x00000004
#define PADCTRLREG_RESETN_SEL_1_RESETN_SHIFT 1
#define PADCTRLREG_RESETN_SEL_1_RESETN_MASK 0x00000002
#define PADCTRLREG_RESETN_SEL_0_RESETN_SHIFT 0
#define PADCTRLREG_RESETN_SEL_0_RESETN_MASK 0x00000001
#define PADCTRLREG_RFST2G_MTSLOTEN3G_OFFSET 0x0000017C
#define PADCTRLREG_RFST2G_MTSLOTEN3G_TYPE UInt32
#define PADCTRLREG_RFST2G_MTSLOTEN3G_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_RFST2G_MTSLOTEN3G_PINSEL_RFST2G_MTSLOTEN3G_SHIFT 8
#define PADCTRLREG_RFST2G_MTSLOTEN3G_PINSEL_RFST2G_MTSLOTEN3G_MASK 0x00000700
#define PADCTRLREG_RFST2G_MTSLOTEN3G_HYS_EN_RFST2G_MTSLOTEN3G_SHIFT 7
#define PADCTRLREG_RFST2G_MTSLOTEN3G_HYS_EN_RFST2G_MTSLOTEN3G_MASK 0x00000080
#define PADCTRLREG_RFST2G_MTSLOTEN3G_PDN_RFST2G_MTSLOTEN3G_SHIFT 6
#define PADCTRLREG_RFST2G_MTSLOTEN3G_PDN_RFST2G_MTSLOTEN3G_MASK 0x00000040
#define PADCTRLREG_RFST2G_MTSLOTEN3G_PUP_RFST2G_MTSLOTEN3G_SHIFT 5
#define PADCTRLREG_RFST2G_MTSLOTEN3G_PUP_RFST2G_MTSLOTEN3G_MASK 0x00000020
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SRC_RFST2G_MTSLOTEN3G_SHIFT 4
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SRC_RFST2G_MTSLOTEN3G_MASK 0x00000010
#define PADCTRLREG_RFST2G_MTSLOTEN3G_IND_RFST2G_MTSLOTEN3G_SHIFT 3
#define PADCTRLREG_RFST2G_MTSLOTEN3G_IND_RFST2G_MTSLOTEN3G_MASK 0x00000008
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SEL_2_RFST2G_MTSLOTEN3G_SHIFT 2
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SEL_2_RFST2G_MTSLOTEN3G_MASK 0x00000004
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SEL_1_RFST2G_MTSLOTEN3G_SHIFT 1
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SEL_1_RFST2G_MTSLOTEN3G_MASK 0x00000002
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SEL_0_RFST2G_MTSLOTEN3G_SHIFT 0
#define PADCTRLREG_RFST2G_MTSLOTEN3G_SEL_0_RFST2G_MTSLOTEN3G_MASK 0x00000001
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_OFFSET 0x00000180
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_TYPE UInt32
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_PINSEL_RTXDATA2G_TXDATA3G1_SHIFT 8
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_PINSEL_RTXDATA2G_TXDATA3G1_MASK 0x00000700
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_HYS_EN_RTXDATA2G_TXDATA3G1_SHIFT 7
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_HYS_EN_RTXDATA2G_TXDATA3G1_MASK 0x00000080
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_PDN_RTXDATA2G_TXDATA3G1_SHIFT 6
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_PDN_RTXDATA2G_TXDATA3G1_MASK 0x00000040
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_PUP_RTXDATA2G_TXDATA3G1_SHIFT 5
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_PUP_RTXDATA2G_TXDATA3G1_MASK 0x00000020
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SRC_RTXDATA2G_TXDATA3G1_SHIFT 4
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SRC_RTXDATA2G_TXDATA3G1_MASK 0x00000010
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_IND_RTXDATA2G_TXDATA3G1_SHIFT 3
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_IND_RTXDATA2G_TXDATA3G1_MASK 0x00000008
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SEL_2_RTXDATA2G_TXDATA3G1_SHIFT 2
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SEL_2_RTXDATA2G_TXDATA3G1_MASK 0x00000004
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SEL_1_RTXDATA2G_TXDATA3G1_SHIFT 1
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SEL_1_RTXDATA2G_TXDATA3G1_MASK 0x00000002
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SEL_0_RTXDATA2G_TXDATA3G1_SHIFT 0
#define PADCTRLREG_RTXDATA2G_TXDATA3G1_SEL_0_RTXDATA2G_TXDATA3G1_MASK 0x00000001
#define PADCTRLREG_RTXEN2G_TXDATA3G2_OFFSET 0x00000184
#define PADCTRLREG_RTXEN2G_TXDATA3G2_TYPE UInt32
#define PADCTRLREG_RTXEN2G_TXDATA3G2_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_RTXEN2G_TXDATA3G2_PINSEL_RTXEN2G_TXDATA3G2_SHIFT 8
#define PADCTRLREG_RTXEN2G_TXDATA3G2_PINSEL_RTXEN2G_TXDATA3G2_MASK 0x00000700
#define PADCTRLREG_RTXEN2G_TXDATA3G2_HYS_EN_RTXEN2G_TXDATA3G2_SHIFT 7
#define PADCTRLREG_RTXEN2G_TXDATA3G2_HYS_EN_RTXEN2G_TXDATA3G2_MASK 0x00000080
#define PADCTRLREG_RTXEN2G_TXDATA3G2_PDN_RTXEN2G_TXDATA3G2_SHIFT 6
#define PADCTRLREG_RTXEN2G_TXDATA3G2_PDN_RTXEN2G_TXDATA3G2_MASK 0x00000040
#define PADCTRLREG_RTXEN2G_TXDATA3G2_PUP_RTXEN2G_TXDATA3G2_SHIFT 5
#define PADCTRLREG_RTXEN2G_TXDATA3G2_PUP_RTXEN2G_TXDATA3G2_MASK 0x00000020
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SRC_RTXEN2G_TXDATA3G2_SHIFT 4
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SRC_RTXEN2G_TXDATA3G2_MASK 0x00000010
#define PADCTRLREG_RTXEN2G_TXDATA3G2_IND_RTXEN2G_TXDATA3G2_SHIFT 3
#define PADCTRLREG_RTXEN2G_TXDATA3G2_IND_RTXEN2G_TXDATA3G2_MASK 0x00000008
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SEL_2_RTXEN2G_TXDATA3G2_SHIFT 2
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SEL_2_RTXEN2G_TXDATA3G2_MASK 0x00000004
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SEL_1_RTXEN2G_TXDATA3G2_SHIFT 1
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SEL_1_RTXEN2G_TXDATA3G2_MASK 0x00000002
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SEL_0_RTXEN2G_TXDATA3G2_SHIFT 0
#define PADCTRLREG_RTXEN2G_TXDATA3G2_SEL_0_RTXEN2G_TXDATA3G2_MASK 0x00000001
#define PADCTRLREG_RXDATA3G0_OFFSET 0x00000188
#define PADCTRLREG_RXDATA3G0_TYPE UInt32
#define PADCTRLREG_RXDATA3G0_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_RXDATA3G0_PINSEL_RXDATA3G0_SHIFT 8
#define PADCTRLREG_RXDATA3G0_PINSEL_RXDATA3G0_MASK 0x00000700
#define PADCTRLREG_RXDATA3G0_HYS_EN_RXDATA3G0_SHIFT 7
#define PADCTRLREG_RXDATA3G0_HYS_EN_RXDATA3G0_MASK 0x00000080
#define PADCTRLREG_RXDATA3G0_PDN_RXDATA3G0_SHIFT 6
#define PADCTRLREG_RXDATA3G0_PDN_RXDATA3G0_MASK 0x00000040
#define PADCTRLREG_RXDATA3G0_PUP_RXDATA3G0_SHIFT 5
#define PADCTRLREG_RXDATA3G0_PUP_RXDATA3G0_MASK 0x00000020
#define PADCTRLREG_RXDATA3G0_SRC_RXDATA3G0_SHIFT 4
#define PADCTRLREG_RXDATA3G0_SRC_RXDATA3G0_MASK 0x00000010
#define PADCTRLREG_RXDATA3G0_IND_RXDATA3G0_SHIFT 3
#define PADCTRLREG_RXDATA3G0_IND_RXDATA3G0_MASK 0x00000008
#define PADCTRLREG_RXDATA3G0_SEL_2_RXDATA3G0_SHIFT 2
#define PADCTRLREG_RXDATA3G0_SEL_2_RXDATA3G0_MASK 0x00000004
#define PADCTRLREG_RXDATA3G0_SEL_1_RXDATA3G0_SHIFT 1
#define PADCTRLREG_RXDATA3G0_SEL_1_RXDATA3G0_MASK 0x00000002
#define PADCTRLREG_RXDATA3G0_SEL_0_RXDATA3G0_SHIFT 0
#define PADCTRLREG_RXDATA3G0_SEL_0_RXDATA3G0_MASK 0x00000001
#define PADCTRLREG_RXDATA3G1_OFFSET 0x0000018C
#define PADCTRLREG_RXDATA3G1_TYPE UInt32
#define PADCTRLREG_RXDATA3G1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_RXDATA3G1_PINSEL_RXDATA3G1_SHIFT 8
#define PADCTRLREG_RXDATA3G1_PINSEL_RXDATA3G1_MASK 0x00000700
#define PADCTRLREG_RXDATA3G1_HYS_EN_RXDATA3G1_SHIFT 7
#define PADCTRLREG_RXDATA3G1_HYS_EN_RXDATA3G1_MASK 0x00000080
#define PADCTRLREG_RXDATA3G1_PDN_RXDATA3G1_SHIFT 6
#define PADCTRLREG_RXDATA3G1_PDN_RXDATA3G1_MASK 0x00000040
#define PADCTRLREG_RXDATA3G1_PUP_RXDATA3G1_SHIFT 5
#define PADCTRLREG_RXDATA3G1_PUP_RXDATA3G1_MASK 0x00000020
#define PADCTRLREG_RXDATA3G1_SRC_RXDATA3G1_SHIFT 4
#define PADCTRLREG_RXDATA3G1_SRC_RXDATA3G1_MASK 0x00000010
#define PADCTRLREG_RXDATA3G1_IND_RXDATA3G1_SHIFT 3
#define PADCTRLREG_RXDATA3G1_IND_RXDATA3G1_MASK 0x00000008
#define PADCTRLREG_RXDATA3G1_SEL_2_RXDATA3G1_SHIFT 2
#define PADCTRLREG_RXDATA3G1_SEL_2_RXDATA3G1_MASK 0x00000004
#define PADCTRLREG_RXDATA3G1_SEL_1_RXDATA3G1_SHIFT 1
#define PADCTRLREG_RXDATA3G1_SEL_1_RXDATA3G1_MASK 0x00000002
#define PADCTRLREG_RXDATA3G1_SEL_0_RXDATA3G1_SHIFT 0
#define PADCTRLREG_RXDATA3G1_SEL_0_RXDATA3G1_MASK 0x00000001
#define PADCTRLREG_RXDATA3G2_OFFSET 0x00000190
#define PADCTRLREG_RXDATA3G2_TYPE UInt32
#define PADCTRLREG_RXDATA3G2_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_RXDATA3G2_PINSEL_RXDATA3G2_SHIFT 8
#define PADCTRLREG_RXDATA3G2_PINSEL_RXDATA3G2_MASK 0x00000700
#define PADCTRLREG_RXDATA3G2_HYS_EN_RXDATA3G2_SHIFT 7
#define PADCTRLREG_RXDATA3G2_HYS_EN_RXDATA3G2_MASK 0x00000080
#define PADCTRLREG_RXDATA3G2_PDN_RXDATA3G2_SHIFT 6
#define PADCTRLREG_RXDATA3G2_PDN_RXDATA3G2_MASK 0x00000040
#define PADCTRLREG_RXDATA3G2_PUP_RXDATA3G2_SHIFT 5
#define PADCTRLREG_RXDATA3G2_PUP_RXDATA3G2_MASK 0x00000020
#define PADCTRLREG_RXDATA3G2_SRC_RXDATA3G2_SHIFT 4
#define PADCTRLREG_RXDATA3G2_SRC_RXDATA3G2_MASK 0x00000010
#define PADCTRLREG_RXDATA3G2_IND_RXDATA3G2_SHIFT 3
#define PADCTRLREG_RXDATA3G2_IND_RXDATA3G2_MASK 0x00000008
#define PADCTRLREG_RXDATA3G2_SEL_2_RXDATA3G2_SHIFT 2
#define PADCTRLREG_RXDATA3G2_SEL_2_RXDATA3G2_MASK 0x00000004
#define PADCTRLREG_RXDATA3G2_SEL_1_RXDATA3G2_SHIFT 1
#define PADCTRLREG_RXDATA3G2_SEL_1_RXDATA3G2_MASK 0x00000002
#define PADCTRLREG_RXDATA3G2_SEL_0_RXDATA3G2_SHIFT 0
#define PADCTRLREG_RXDATA3G2_SEL_0_RXDATA3G2_MASK 0x00000001
#define PADCTRLREG_SDCK_OFFSET 0x00000194
#define PADCTRLREG_SDCK_TYPE UInt32
#define PADCTRLREG_SDCK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SDCK_PINSEL_SDCK_SHIFT 8
#define PADCTRLREG_SDCK_PINSEL_SDCK_MASK 0x00000700
#define PADCTRLREG_SDCK_HYS_EN_SDCK_SHIFT 7
#define PADCTRLREG_SDCK_HYS_EN_SDCK_MASK 0x00000080
#define PADCTRLREG_SDCK_PDN_SDCK_SHIFT 6
#define PADCTRLREG_SDCK_PDN_SDCK_MASK 0x00000040
#define PADCTRLREG_SDCK_PUP_SDCK_SHIFT 5
#define PADCTRLREG_SDCK_PUP_SDCK_MASK 0x00000020
#define PADCTRLREG_SDCK_SRC_SDCK_SHIFT 4
#define PADCTRLREG_SDCK_SRC_SDCK_MASK 0x00000010
#define PADCTRLREG_SDCK_IND_SDCK_SHIFT 3
#define PADCTRLREG_SDCK_IND_SDCK_MASK 0x00000008
#define PADCTRLREG_SDCK_SEL_2_SDCK_SHIFT 2
#define PADCTRLREG_SDCK_SEL_2_SDCK_MASK 0x00000004
#define PADCTRLREG_SDCK_SEL_1_SDCK_SHIFT 1
#define PADCTRLREG_SDCK_SEL_1_SDCK_MASK 0x00000002
#define PADCTRLREG_SDCK_SEL_0_SDCK_SHIFT 0
#define PADCTRLREG_SDCK_SEL_0_SDCK_MASK 0x00000001
#define PADCTRLREG_SDCMD_OFFSET 0x00000198
#define PADCTRLREG_SDCMD_TYPE UInt32
#define PADCTRLREG_SDCMD_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SDCMD_PINSEL_SDCMD_SHIFT 8
#define PADCTRLREG_SDCMD_PINSEL_SDCMD_MASK 0x00000700
#define PADCTRLREG_SDCMD_HYS_EN_SDCMD_SHIFT 7
#define PADCTRLREG_SDCMD_HYS_EN_SDCMD_MASK 0x00000080
#define PADCTRLREG_SDCMD_PDN_SDCMD_SHIFT 6
#define PADCTRLREG_SDCMD_PDN_SDCMD_MASK 0x00000040
#define PADCTRLREG_SDCMD_PUP_SDCMD_SHIFT 5
#define PADCTRLREG_SDCMD_PUP_SDCMD_MASK 0x00000020
#define PADCTRLREG_SDCMD_SRC_SDCMD_SHIFT 4
#define PADCTRLREG_SDCMD_SRC_SDCMD_MASK 0x00000010
#define PADCTRLREG_SDCMD_IND_SDCMD_SHIFT 3
#define PADCTRLREG_SDCMD_IND_SDCMD_MASK 0x00000008
#define PADCTRLREG_SDCMD_SEL_2_SDCMD_SHIFT 2
#define PADCTRLREG_SDCMD_SEL_2_SDCMD_MASK 0x00000004
#define PADCTRLREG_SDCMD_SEL_1_SDCMD_SHIFT 1
#define PADCTRLREG_SDCMD_SEL_1_SDCMD_MASK 0x00000002
#define PADCTRLREG_SDCMD_SEL_0_SDCMD_SHIFT 0
#define PADCTRLREG_SDCMD_SEL_0_SDCMD_MASK 0x00000001
#define PADCTRLREG_SDDAT0_OFFSET 0x0000019C
#define PADCTRLREG_SDDAT0_TYPE UInt32
#define PADCTRLREG_SDDAT0_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SDDAT0_PINSEL_SDDAT0_SHIFT 8
#define PADCTRLREG_SDDAT0_PINSEL_SDDAT0_MASK 0x00000700
#define PADCTRLREG_SDDAT0_HYS_EN_SDDAT0_SHIFT 7
#define PADCTRLREG_SDDAT0_HYS_EN_SDDAT0_MASK 0x00000080
#define PADCTRLREG_SDDAT0_PDN_SDDAT0_SHIFT 6
#define PADCTRLREG_SDDAT0_PDN_SDDAT0_MASK 0x00000040
#define PADCTRLREG_SDDAT0_PUP_SDDAT0_SHIFT 5
#define PADCTRLREG_SDDAT0_PUP_SDDAT0_MASK 0x00000020
#define PADCTRLREG_SDDAT0_SRC_SDDAT0_SHIFT 4
#define PADCTRLREG_SDDAT0_SRC_SDDAT0_MASK 0x00000010
#define PADCTRLREG_SDDAT0_IND_SDDAT0_SHIFT 3
#define PADCTRLREG_SDDAT0_IND_SDDAT0_MASK 0x00000008
#define PADCTRLREG_SDDAT0_SEL_2_SDDAT0_SHIFT 2
#define PADCTRLREG_SDDAT0_SEL_2_SDDAT0_MASK 0x00000004
#define PADCTRLREG_SDDAT0_SEL_1_SDDAT0_SHIFT 1
#define PADCTRLREG_SDDAT0_SEL_1_SDDAT0_MASK 0x00000002
#define PADCTRLREG_SDDAT0_SEL_0_SDDAT0_SHIFT 0
#define PADCTRLREG_SDDAT0_SEL_0_SDDAT0_MASK 0x00000001
#define PADCTRLREG_SDDAT1_OFFSET 0x000001A0
#define PADCTRLREG_SDDAT1_TYPE UInt32
#define PADCTRLREG_SDDAT1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SDDAT1_PINSEL_SDDAT1_SHIFT 8
#define PADCTRLREG_SDDAT1_PINSEL_SDDAT1_MASK 0x00000700
#define PADCTRLREG_SDDAT1_HYS_EN_SDDAT1_SHIFT 7
#define PADCTRLREG_SDDAT1_HYS_EN_SDDAT1_MASK 0x00000080
#define PADCTRLREG_SDDAT1_PDN_SDDAT1_SHIFT 6
#define PADCTRLREG_SDDAT1_PDN_SDDAT1_MASK 0x00000040
#define PADCTRLREG_SDDAT1_PUP_SDDAT1_SHIFT 5
#define PADCTRLREG_SDDAT1_PUP_SDDAT1_MASK 0x00000020
#define PADCTRLREG_SDDAT1_SRC_SDDAT1_SHIFT 4
#define PADCTRLREG_SDDAT1_SRC_SDDAT1_MASK 0x00000010
#define PADCTRLREG_SDDAT1_IND_SDDAT1_SHIFT 3
#define PADCTRLREG_SDDAT1_IND_SDDAT1_MASK 0x00000008
#define PADCTRLREG_SDDAT1_SEL_2_SDDAT1_SHIFT 2
#define PADCTRLREG_SDDAT1_SEL_2_SDDAT1_MASK 0x00000004
#define PADCTRLREG_SDDAT1_SEL_1_SDDAT1_SHIFT 1
#define PADCTRLREG_SDDAT1_SEL_1_SDDAT1_MASK 0x00000002
#define PADCTRLREG_SDDAT1_SEL_0_SDDAT1_SHIFT 0
#define PADCTRLREG_SDDAT1_SEL_0_SDDAT1_MASK 0x00000001
#define PADCTRLREG_SDDAT2_OFFSET 0x000001A4
#define PADCTRLREG_SDDAT2_TYPE UInt32
#define PADCTRLREG_SDDAT2_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SDDAT2_PINSEL_SDDAT2_SHIFT 8
#define PADCTRLREG_SDDAT2_PINSEL_SDDAT2_MASK 0x00000700
#define PADCTRLREG_SDDAT2_HYS_EN_SDDAT2_SHIFT 7
#define PADCTRLREG_SDDAT2_HYS_EN_SDDAT2_MASK 0x00000080
#define PADCTRLREG_SDDAT2_PDN_SDDAT2_SHIFT 6
#define PADCTRLREG_SDDAT2_PDN_SDDAT2_MASK 0x00000040
#define PADCTRLREG_SDDAT2_PUP_SDDAT2_SHIFT 5
#define PADCTRLREG_SDDAT2_PUP_SDDAT2_MASK 0x00000020
#define PADCTRLREG_SDDAT2_SRC_SDDAT2_SHIFT 4
#define PADCTRLREG_SDDAT2_SRC_SDDAT2_MASK 0x00000010
#define PADCTRLREG_SDDAT2_IND_SDDAT2_SHIFT 3
#define PADCTRLREG_SDDAT2_IND_SDDAT2_MASK 0x00000008
#define PADCTRLREG_SDDAT2_SEL_2_SDDAT2_SHIFT 2
#define PADCTRLREG_SDDAT2_SEL_2_SDDAT2_MASK 0x00000004
#define PADCTRLREG_SDDAT2_SEL_1_SDDAT2_SHIFT 1
#define PADCTRLREG_SDDAT2_SEL_1_SDDAT2_MASK 0x00000002
#define PADCTRLREG_SDDAT2_SEL_0_SDDAT2_SHIFT 0
#define PADCTRLREG_SDDAT2_SEL_0_SDDAT2_MASK 0x00000001
#define PADCTRLREG_SDDAT3_OFFSET 0x000001A8
#define PADCTRLREG_SDDAT3_TYPE UInt32
#define PADCTRLREG_SDDAT3_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SDDAT3_PINSEL_SDDAT3_SHIFT 8
#define PADCTRLREG_SDDAT3_PINSEL_SDDAT3_MASK 0x00000700
#define PADCTRLREG_SDDAT3_HYS_EN_SDDAT3_SHIFT 7
#define PADCTRLREG_SDDAT3_HYS_EN_SDDAT3_MASK 0x00000080
#define PADCTRLREG_SDDAT3_PDN_SDDAT3_SHIFT 6
#define PADCTRLREG_SDDAT3_PDN_SDDAT3_MASK 0x00000040
#define PADCTRLREG_SDDAT3_PUP_SDDAT3_SHIFT 5
#define PADCTRLREG_SDDAT3_PUP_SDDAT3_MASK 0x00000020
#define PADCTRLREG_SDDAT3_SRC_SDDAT3_SHIFT 4
#define PADCTRLREG_SDDAT3_SRC_SDDAT3_MASK 0x00000010
#define PADCTRLREG_SDDAT3_IND_SDDAT3_SHIFT 3
#define PADCTRLREG_SDDAT3_IND_SDDAT3_MASK 0x00000008
#define PADCTRLREG_SDDAT3_SEL_2_SDDAT3_SHIFT 2
#define PADCTRLREG_SDDAT3_SEL_2_SDDAT3_MASK 0x00000004
#define PADCTRLREG_SDDAT3_SEL_1_SDDAT3_SHIFT 1
#define PADCTRLREG_SDDAT3_SEL_1_SDDAT3_MASK 0x00000002
#define PADCTRLREG_SDDAT3_SEL_0_SDDAT3_SHIFT 0
#define PADCTRLREG_SDDAT3_SEL_0_SDDAT3_MASK 0x00000001
#define PADCTRLREG_SIMCLK_OFFSET 0x000001AC
#define PADCTRLREG_SIMCLK_TYPE UInt32
#define PADCTRLREG_SIMCLK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SIMCLK_PINSEL_SIMCLK_SHIFT 8
#define PADCTRLREG_SIMCLK_PINSEL_SIMCLK_MASK 0x00000700
#define PADCTRLREG_SIMCLK_HYS_EN_SIMCLK_SHIFT 7
#define PADCTRLREG_SIMCLK_HYS_EN_SIMCLK_MASK 0x00000080
#define PADCTRLREG_SIMCLK_PDN_SIMCLK_SHIFT 6
#define PADCTRLREG_SIMCLK_PDN_SIMCLK_MASK 0x00000040
#define PADCTRLREG_SIMCLK_PUP_SIMCLK_SHIFT 5
#define PADCTRLREG_SIMCLK_PUP_SIMCLK_MASK 0x00000020
#define PADCTRLREG_SIMCLK_SRC_SIMCLK_SHIFT 4
#define PADCTRLREG_SIMCLK_SRC_SIMCLK_MASK 0x00000010
#define PADCTRLREG_SIMCLK_IND_SIMCLK_SHIFT 3
#define PADCTRLREG_SIMCLK_IND_SIMCLK_MASK 0x00000008
#define PADCTRLREG_SIMCLK_SEL_2_SIMCLK_SHIFT 2
#define PADCTRLREG_SIMCLK_SEL_2_SIMCLK_MASK 0x00000004
#define PADCTRLREG_SIMCLK_SEL_1_SIMCLK_SHIFT 1
#define PADCTRLREG_SIMCLK_SEL_1_SIMCLK_MASK 0x00000002
#define PADCTRLREG_SIMCLK_SEL_0_SIMCLK_SHIFT 0
#define PADCTRLREG_SIMCLK_SEL_0_SIMCLK_MASK 0x00000001
#define PADCTRLREG_SIMDAT_OFFSET 0x000001B0
#define PADCTRLREG_SIMDAT_TYPE UInt32
#define PADCTRLREG_SIMDAT_RESERVED_MASK 0xFFFFE000
#define PADCTRLREG_SIMDAT_PUPM1_SIMDAT_SHIFT 12
#define PADCTRLREG_SIMDAT_PUPM1_SIMDAT_MASK 0x00001000
#define PADCTRLREG_SIMDAT_PUPM0_SIMDAT_SHIFT 11
#define PADCTRLREG_SIMDAT_PUPM0_SIMDAT_MASK 0x00000800
#define PADCTRLREG_SIMDAT_PINSEL_SIMDAT_SHIFT 8
#define PADCTRLREG_SIMDAT_PINSEL_SIMDAT_MASK 0x00000700
#define PADCTRLREG_SIMDAT_HYS_EN_SIMDAT_SHIFT 7
#define PADCTRLREG_SIMDAT_HYS_EN_SIMDAT_MASK 0x00000080
#define PADCTRLREG_SIMDAT_PDN_SIMDAT_SHIFT 6
#define PADCTRLREG_SIMDAT_PDN_SIMDAT_MASK 0x00000040
#define PADCTRLREG_SIMDAT_PUP_SIMDAT_SHIFT 5
#define PADCTRLREG_SIMDAT_PUP_SIMDAT_MASK 0x00000020
#define PADCTRLREG_SIMDAT_SRC_SIMDAT_SHIFT 4
#define PADCTRLREG_SIMDAT_SRC_SIMDAT_MASK 0x00000010
#define PADCTRLREG_SIMDAT_IND_SIMDAT_SHIFT 3
#define PADCTRLREG_SIMDAT_IND_SIMDAT_MASK 0x00000008
#define PADCTRLREG_SIMDAT_SEL_2_SIMDAT_SHIFT 2
#define PADCTRLREG_SIMDAT_SEL_2_SIMDAT_MASK 0x00000004
#define PADCTRLREG_SIMDAT_SEL_1_SIMDAT_SHIFT 1
#define PADCTRLREG_SIMDAT_SEL_1_SIMDAT_MASK 0x00000002
#define PADCTRLREG_SIMDAT_SEL_0_SIMDAT_SHIFT 0
#define PADCTRLREG_SIMDAT_SEL_0_SIMDAT_MASK 0x00000001
#define PADCTRLREG_SIMDET_OFFSET 0x000001B4
#define PADCTRLREG_SIMDET_TYPE UInt32
#define PADCTRLREG_SIMDET_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SIMDET_PINSEL_SIMDET_SHIFT 8
#define PADCTRLREG_SIMDET_PINSEL_SIMDET_MASK 0x00000700
#define PADCTRLREG_SIMDET_HYS_EN_SIMDET_SHIFT 7
#define PADCTRLREG_SIMDET_HYS_EN_SIMDET_MASK 0x00000080
#define PADCTRLREG_SIMDET_PDN_SIMDET_SHIFT 6
#define PADCTRLREG_SIMDET_PDN_SIMDET_MASK 0x00000040
#define PADCTRLREG_SIMDET_PUP_SIMDET_SHIFT 5
#define PADCTRLREG_SIMDET_PUP_SIMDET_MASK 0x00000020
#define PADCTRLREG_SIMDET_SRC_SIMDET_SHIFT 4
#define PADCTRLREG_SIMDET_SRC_SIMDET_MASK 0x00000010
#define PADCTRLREG_SIMDET_IND_SIMDET_SHIFT 3
#define PADCTRLREG_SIMDET_IND_SIMDET_MASK 0x00000008
#define PADCTRLREG_SIMDET_SEL_2_SIMDET_SHIFT 2
#define PADCTRLREG_SIMDET_SEL_2_SIMDET_MASK 0x00000004
#define PADCTRLREG_SIMDET_SEL_1_SIMDET_SHIFT 1
#define PADCTRLREG_SIMDET_SEL_1_SIMDET_MASK 0x00000002
#define PADCTRLREG_SIMDET_SEL_0_SIMDET_SHIFT 0
#define PADCTRLREG_SIMDET_SEL_0_SIMDET_MASK 0x00000001
#define PADCTRLREG_SIMRST_OFFSET 0x000001B8
#define PADCTRLREG_SIMRST_TYPE UInt32
#define PADCTRLREG_SIMRST_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SIMRST_PINSEL_SIMRST_SHIFT 8
#define PADCTRLREG_SIMRST_PINSEL_SIMRST_MASK 0x00000700
#define PADCTRLREG_SIMRST_HYS_EN_SIMRST_SHIFT 7
#define PADCTRLREG_SIMRST_HYS_EN_SIMRST_MASK 0x00000080
#define PADCTRLREG_SIMRST_PDN_SIMRST_SHIFT 6
#define PADCTRLREG_SIMRST_PDN_SIMRST_MASK 0x00000040
#define PADCTRLREG_SIMRST_PUP_SIMRST_SHIFT 5
#define PADCTRLREG_SIMRST_PUP_SIMRST_MASK 0x00000020
#define PADCTRLREG_SIMRST_SRC_SIMRST_SHIFT 4
#define PADCTRLREG_SIMRST_SRC_SIMRST_MASK 0x00000010
#define PADCTRLREG_SIMRST_IND_SIMRST_SHIFT 3
#define PADCTRLREG_SIMRST_IND_SIMRST_MASK 0x00000008
#define PADCTRLREG_SIMRST_SEL_2_SIMRST_SHIFT 2
#define PADCTRLREG_SIMRST_SEL_2_SIMRST_MASK 0x00000004
#define PADCTRLREG_SIMRST_SEL_1_SIMRST_SHIFT 1
#define PADCTRLREG_SIMRST_SEL_1_SIMRST_MASK 0x00000002
#define PADCTRLREG_SIMRST_SEL_0_SIMRST_SHIFT 0
#define PADCTRLREG_SIMRST_SEL_0_SIMRST_MASK 0x00000001
#define PADCTRLREG_GPIO93_OFFSET 0x000001BC
#define PADCTRLREG_GPIO93_TYPE UInt32
#define PADCTRLREG_GPIO93_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO93_PINSEL_GPIO93_SHIFT 8
#define PADCTRLREG_GPIO93_PINSEL_GPIO93_MASK 0x00000700
#define PADCTRLREG_GPIO93_HYS_EN_GPIO93_SHIFT 7
#define PADCTRLREG_GPIO93_HYS_EN_GPIO93_MASK 0x00000080
#define PADCTRLREG_GPIO93_PDN_GPIO93_SHIFT 6
#define PADCTRLREG_GPIO93_PDN_GPIO93_MASK 0x00000040
#define PADCTRLREG_GPIO93_PUP_GPIO93_SHIFT 5
#define PADCTRLREG_GPIO93_PUP_GPIO93_MASK 0x00000020
#define PADCTRLREG_GPIO93_SRC_GPIO93_SHIFT 4
#define PADCTRLREG_GPIO93_SRC_GPIO93_MASK 0x00000010
#define PADCTRLREG_GPIO93_IND_GPIO93_SHIFT 3
#define PADCTRLREG_GPIO93_IND_GPIO93_MASK 0x00000008
#define PADCTRLREG_GPIO93_SEL_2_GPIO93_SHIFT 2
#define PADCTRLREG_GPIO93_SEL_2_GPIO93_MASK 0x00000004
#define PADCTRLREG_GPIO93_SEL_1_GPIO93_SHIFT 1
#define PADCTRLREG_GPIO93_SEL_1_GPIO93_MASK 0x00000002
#define PADCTRLREG_GPIO93_SEL_0_GPIO93_SHIFT 0
#define PADCTRLREG_GPIO93_SEL_0_GPIO93_MASK 0x00000001
#define PADCTRLREG_GPIO94_OFFSET 0x000001C0
#define PADCTRLREG_GPIO94_TYPE UInt32
#define PADCTRLREG_GPIO94_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_GPIO94_PINSEL_GPIO94_SHIFT 8
#define PADCTRLREG_GPIO94_PINSEL_GPIO94_MASK 0x00000700
#define PADCTRLREG_GPIO94_HYS_EN_GPIO94_SHIFT 7
#define PADCTRLREG_GPIO94_HYS_EN_GPIO94_MASK 0x00000080
#define PADCTRLREG_GPIO94_PDN_GPIO94_SHIFT 6
#define PADCTRLREG_GPIO94_PDN_GPIO94_MASK 0x00000040
#define PADCTRLREG_GPIO94_PUP_GPIO94_SHIFT 5
#define PADCTRLREG_GPIO94_PUP_GPIO94_MASK 0x00000020
#define PADCTRLREG_GPIO94_SRC_GPIO94_SHIFT 4
#define PADCTRLREG_GPIO94_SRC_GPIO94_MASK 0x00000010
#define PADCTRLREG_GPIO94_IND_GPIO94_SHIFT 3
#define PADCTRLREG_GPIO94_IND_GPIO94_MASK 0x00000008
#define PADCTRLREG_GPIO94_SEL_2_GPIO94_SHIFT 2
#define PADCTRLREG_GPIO94_SEL_2_GPIO94_MASK 0x00000004
#define PADCTRLREG_GPIO94_SEL_1_GPIO94_SHIFT 1
#define PADCTRLREG_GPIO94_SEL_1_GPIO94_MASK 0x00000002
#define PADCTRLREG_GPIO94_SEL_0_GPIO94_SHIFT 0
#define PADCTRLREG_GPIO94_SEL_0_GPIO94_MASK 0x00000001
#define PADCTRLREG_SPI0CLK_OFFSET 0x000001C4
#define PADCTRLREG_SPI0CLK_TYPE UInt32
#define PADCTRLREG_SPI0CLK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SPI0CLK_PINSEL_SPI0CLK_SHIFT 8
#define PADCTRLREG_SPI0CLK_PINSEL_SPI0CLK_MASK 0x00000700
#define PADCTRLREG_SPI0CLK_HYS_EN_SPI0CLK_SHIFT 7
#define PADCTRLREG_SPI0CLK_HYS_EN_SPI0CLK_MASK 0x00000080
#define PADCTRLREG_SPI0CLK_PDN_SPI0CLK_SHIFT 6
#define PADCTRLREG_SPI0CLK_PDN_SPI0CLK_MASK 0x00000040
#define PADCTRLREG_SPI0CLK_PUP_SPI0CLK_SHIFT 5
#define PADCTRLREG_SPI0CLK_PUP_SPI0CLK_MASK 0x00000020
#define PADCTRLREG_SPI0CLK_SRC_SPI0CLK_SHIFT 4
#define PADCTRLREG_SPI0CLK_SRC_SPI0CLK_MASK 0x00000010
#define PADCTRLREG_SPI0CLK_IND_SPI0CLK_SHIFT 3
#define PADCTRLREG_SPI0CLK_IND_SPI0CLK_MASK 0x00000008
#define PADCTRLREG_SPI0CLK_SEL_2_SPI0CLK_SHIFT 2
#define PADCTRLREG_SPI0CLK_SEL_2_SPI0CLK_MASK 0x00000004
#define PADCTRLREG_SPI0CLK_SEL_1_SPI0CLK_SHIFT 1
#define PADCTRLREG_SPI0CLK_SEL_1_SPI0CLK_MASK 0x00000002
#define PADCTRLREG_SPI0CLK_SEL_0_SPI0CLK_SHIFT 0
#define PADCTRLREG_SPI0CLK_SEL_0_SPI0CLK_MASK 0x00000001
#define PADCTRLREG_SPI0FSS_OFFSET 0x000001C8
#define PADCTRLREG_SPI0FSS_TYPE UInt32
#define PADCTRLREG_SPI0FSS_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SPI0FSS_PINSEL_SPI0FSS_SHIFT 8
#define PADCTRLREG_SPI0FSS_PINSEL_SPI0FSS_MASK 0x00000700
#define PADCTRLREG_SPI0FSS_HYS_EN_SPI0FSS_SHIFT 7
#define PADCTRLREG_SPI0FSS_HYS_EN_SPI0FSS_MASK 0x00000080
#define PADCTRLREG_SPI0FSS_PDN_SPI0FSS_SHIFT 6
#define PADCTRLREG_SPI0FSS_PDN_SPI0FSS_MASK 0x00000040
#define PADCTRLREG_SPI0FSS_PUP_SPI0FSS_SHIFT 5
#define PADCTRLREG_SPI0FSS_PUP_SPI0FSS_MASK 0x00000020
#define PADCTRLREG_SPI0FSS_SRC_SPI0FSS_SHIFT 4
#define PADCTRLREG_SPI0FSS_SRC_SPI0FSS_MASK 0x00000010
#define PADCTRLREG_SPI0FSS_IND_SPI0FSS_SHIFT 3
#define PADCTRLREG_SPI0FSS_IND_SPI0FSS_MASK 0x00000008
#define PADCTRLREG_SPI0FSS_SEL_2_SPI0FSS_SHIFT 2
#define PADCTRLREG_SPI0FSS_SEL_2_SPI0FSS_MASK 0x00000004
#define PADCTRLREG_SPI0FSS_SEL_1_SPI0FSS_SHIFT 1
#define PADCTRLREG_SPI0FSS_SEL_1_SPI0FSS_MASK 0x00000002
#define PADCTRLREG_SPI0FSS_SEL_0_SPI0FSS_SHIFT 0
#define PADCTRLREG_SPI0FSS_SEL_0_SPI0FSS_MASK 0x00000001
#define PADCTRLREG_SPI0RXD_OFFSET 0x000001CC
#define PADCTRLREG_SPI0RXD_TYPE UInt32
#define PADCTRLREG_SPI0RXD_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SPI0RXD_PINSEL_SPI0RXD_SHIFT 8
#define PADCTRLREG_SPI0RXD_PINSEL_SPI0RXD_MASK 0x00000700
#define PADCTRLREG_SPI0RXD_HYS_EN_SPI0RXD_SHIFT 7
#define PADCTRLREG_SPI0RXD_HYS_EN_SPI0RXD_MASK 0x00000080
#define PADCTRLREG_SPI0RXD_PDN_SPI0RXD_SHIFT 6
#define PADCTRLREG_SPI0RXD_PDN_SPI0RXD_MASK 0x00000040
#define PADCTRLREG_SPI0RXD_PUP_SPI0RXD_SHIFT 5
#define PADCTRLREG_SPI0RXD_PUP_SPI0RXD_MASK 0x00000020
#define PADCTRLREG_SPI0RXD_SRC_SPI0RXD_SHIFT 4
#define PADCTRLREG_SPI0RXD_SRC_SPI0RXD_MASK 0x00000010
#define PADCTRLREG_SPI0RXD_IND_SPI0RXD_SHIFT 3
#define PADCTRLREG_SPI0RXD_IND_SPI0RXD_MASK 0x00000008
#define PADCTRLREG_SPI0RXD_SEL_2_SPI0RXD_SHIFT 2
#define PADCTRLREG_SPI0RXD_SEL_2_SPI0RXD_MASK 0x00000004
#define PADCTRLREG_SPI0RXD_SEL_1_SPI0RXD_SHIFT 1
#define PADCTRLREG_SPI0RXD_SEL_1_SPI0RXD_MASK 0x00000002
#define PADCTRLREG_SPI0RXD_SEL_0_SPI0RXD_SHIFT 0
#define PADCTRLREG_SPI0RXD_SEL_0_SPI0RXD_MASK 0x00000001
#define PADCTRLREG_SPI0TXD_OFFSET 0x000001D0
#define PADCTRLREG_SPI0TXD_TYPE UInt32
#define PADCTRLREG_SPI0TXD_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SPI0TXD_PINSEL_SPI0TXD_SHIFT 8
#define PADCTRLREG_SPI0TXD_PINSEL_SPI0TXD_MASK 0x00000700
#define PADCTRLREG_SPI0TXD_HYS_EN_SPI0TXD_SHIFT 7
#define PADCTRLREG_SPI0TXD_HYS_EN_SPI0TXD_MASK 0x00000080
#define PADCTRLREG_SPI0TXD_PDN_SPI0TXD_SHIFT 6
#define PADCTRLREG_SPI0TXD_PDN_SPI0TXD_MASK 0x00000040
#define PADCTRLREG_SPI0TXD_PUP_SPI0TXD_SHIFT 5
#define PADCTRLREG_SPI0TXD_PUP_SPI0TXD_MASK 0x00000020
#define PADCTRLREG_SPI0TXD_SRC_SPI0TXD_SHIFT 4
#define PADCTRLREG_SPI0TXD_SRC_SPI0TXD_MASK 0x00000010
#define PADCTRLREG_SPI0TXD_IND_SPI0TXD_SHIFT 3
#define PADCTRLREG_SPI0TXD_IND_SPI0TXD_MASK 0x00000008
#define PADCTRLREG_SPI0TXD_SEL_2_SPI0TXD_SHIFT 2
#define PADCTRLREG_SPI0TXD_SEL_2_SPI0TXD_MASK 0x00000004
#define PADCTRLREG_SPI0TXD_SEL_1_SPI0TXD_SHIFT 1
#define PADCTRLREG_SPI0TXD_SEL_1_SPI0TXD_MASK 0x00000002
#define PADCTRLREG_SPI0TXD_SEL_0_SPI0TXD_SHIFT 0
#define PADCTRLREG_SPI0TXD_SEL_0_SPI0TXD_MASK 0x00000001
#define PADCTRLREG_SRI_C_OFFSET 0x000001D4
#define PADCTRLREG_SRI_C_TYPE UInt32
#define PADCTRLREG_SRI_C_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SRI_C_PINSEL_SRI_C_SHIFT 8
#define PADCTRLREG_SRI_C_PINSEL_SRI_C_MASK 0x00000700
#define PADCTRLREG_SRI_C_HYS_EN_SRI_C_SHIFT 7
#define PADCTRLREG_SRI_C_HYS_EN_SRI_C_MASK 0x00000080
#define PADCTRLREG_SRI_C_PDN_SRI_C_SHIFT 6
#define PADCTRLREG_SRI_C_PDN_SRI_C_MASK 0x00000040
#define PADCTRLREG_SRI_C_PUP_SRI_C_SHIFT 5
#define PADCTRLREG_SRI_C_PUP_SRI_C_MASK 0x00000020
#define PADCTRLREG_SRI_C_SRC_SRI_C_SHIFT 4
#define PADCTRLREG_SRI_C_SRC_SRI_C_MASK 0x00000010
#define PADCTRLREG_SRI_C_IND_SRI_C_SHIFT 3
#define PADCTRLREG_SRI_C_IND_SRI_C_MASK 0x00000008
#define PADCTRLREG_SRI_C_SEL_2_SRI_C_SHIFT 2
#define PADCTRLREG_SRI_C_SEL_2_SRI_C_MASK 0x00000004
#define PADCTRLREG_SRI_C_SEL_1_SRI_C_SHIFT 1
#define PADCTRLREG_SRI_C_SEL_1_SRI_C_MASK 0x00000002
#define PADCTRLREG_SRI_C_SEL_0_SRI_C_SHIFT 0
#define PADCTRLREG_SRI_C_SEL_0_SRI_C_MASK 0x00000001
#define PADCTRLREG_SRI_D_OFFSET 0x000001D8
#define PADCTRLREG_SRI_D_TYPE UInt32
#define PADCTRLREG_SRI_D_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SRI_D_PINSEL_SRI_D_SHIFT 8
#define PADCTRLREG_SRI_D_PINSEL_SRI_D_MASK 0x00000700
#define PADCTRLREG_SRI_D_HYS_EN_SRI_D_SHIFT 7
#define PADCTRLREG_SRI_D_HYS_EN_SRI_D_MASK 0x00000080
#define PADCTRLREG_SRI_D_PDN_SRI_D_SHIFT 6
#define PADCTRLREG_SRI_D_PDN_SRI_D_MASK 0x00000040
#define PADCTRLREG_SRI_D_PUP_SRI_D_SHIFT 5
#define PADCTRLREG_SRI_D_PUP_SRI_D_MASK 0x00000020
#define PADCTRLREG_SRI_D_SRC_SRI_D_SHIFT 4
#define PADCTRLREG_SRI_D_SRC_SRI_D_MASK 0x00000010
#define PADCTRLREG_SRI_D_IND_SRI_D_SHIFT 3
#define PADCTRLREG_SRI_D_IND_SRI_D_MASK 0x00000008
#define PADCTRLREG_SRI_D_SEL_2_SRI_D_SHIFT 2
#define PADCTRLREG_SRI_D_SEL_2_SRI_D_MASK 0x00000004
#define PADCTRLREG_SRI_D_SEL_1_SRI_D_SHIFT 1
#define PADCTRLREG_SRI_D_SEL_1_SRI_D_MASK 0x00000002
#define PADCTRLREG_SRI_D_SEL_0_SRI_D_SHIFT 0
#define PADCTRLREG_SRI_D_SEL_0_SRI_D_MASK 0x00000001
#define PADCTRLREG_SRI_E_OFFSET 0x000001DC
#define PADCTRLREG_SRI_E_TYPE UInt32
#define PADCTRLREG_SRI_E_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SRI_E_PINSEL_SRI_E_SHIFT 8
#define PADCTRLREG_SRI_E_PINSEL_SRI_E_MASK 0x00000700
#define PADCTRLREG_SRI_E_HYS_EN_SRI_E_SHIFT 7
#define PADCTRLREG_SRI_E_HYS_EN_SRI_E_MASK 0x00000080
#define PADCTRLREG_SRI_E_PDN_SRI_E_SHIFT 6
#define PADCTRLREG_SRI_E_PDN_SRI_E_MASK 0x00000040
#define PADCTRLREG_SRI_E_PUP_SRI_E_SHIFT 5
#define PADCTRLREG_SRI_E_PUP_SRI_E_MASK 0x00000020
#define PADCTRLREG_SRI_E_SRC_SRI_E_SHIFT 4
#define PADCTRLREG_SRI_E_SRC_SRI_E_MASK 0x00000010
#define PADCTRLREG_SRI_E_IND_SRI_E_SHIFT 3
#define PADCTRLREG_SRI_E_IND_SRI_E_MASK 0x00000008
#define PADCTRLREG_SRI_E_SEL_2_SRI_E_SHIFT 2
#define PADCTRLREG_SRI_E_SEL_2_SRI_E_MASK 0x00000004
#define PADCTRLREG_SRI_E_SEL_1_SRI_E_SHIFT 1
#define PADCTRLREG_SRI_E_SEL_1_SRI_E_MASK 0x00000002
#define PADCTRLREG_SRI_E_SEL_0_SRI_E_SHIFT 0
#define PADCTRLREG_SRI_E_SEL_0_SRI_E_MASK 0x00000001
#define PADCTRLREG_SSPCK_OFFSET 0x000001E0
#define PADCTRLREG_SSPCK_TYPE UInt32
#define PADCTRLREG_SSPCK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SSPCK_PINSEL_SSPCK_SHIFT 8
#define PADCTRLREG_SSPCK_PINSEL_SSPCK_MASK 0x00000700
#define PADCTRLREG_SSPCK_HYS_EN_SSPCK_SHIFT 7
#define PADCTRLREG_SSPCK_HYS_EN_SSPCK_MASK 0x00000080
#define PADCTRLREG_SSPCK_PDN_SSPCK_SHIFT 6
#define PADCTRLREG_SSPCK_PDN_SSPCK_MASK 0x00000040
#define PADCTRLREG_SSPCK_PUP_SSPCK_SHIFT 5
#define PADCTRLREG_SSPCK_PUP_SSPCK_MASK 0x00000020
#define PADCTRLREG_SSPCK_SRC_SSPCK_SHIFT 4
#define PADCTRLREG_SSPCK_SRC_SSPCK_MASK 0x00000010
#define PADCTRLREG_SSPCK_IND_SSPCK_SHIFT 3
#define PADCTRLREG_SSPCK_IND_SSPCK_MASK 0x00000008
#define PADCTRLREG_SSPCK_SEL_2_SSPCK_SHIFT 2
#define PADCTRLREG_SSPCK_SEL_2_SSPCK_MASK 0x00000004
#define PADCTRLREG_SSPCK_SEL_1_SSPCK_SHIFT 1
#define PADCTRLREG_SSPCK_SEL_1_SSPCK_MASK 0x00000002
#define PADCTRLREG_SSPCK_SEL_0_SSPCK_SHIFT 0
#define PADCTRLREG_SSPCK_SEL_0_SSPCK_MASK 0x00000001
#define PADCTRLREG_SSPDI_OFFSET 0x000001E4
#define PADCTRLREG_SSPDI_TYPE UInt32
#define PADCTRLREG_SSPDI_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SSPDI_PINSEL_SSPDI_SHIFT 8
#define PADCTRLREG_SSPDI_PINSEL_SSPDI_MASK 0x00000700
#define PADCTRLREG_SSPDI_HYS_EN_SSPDI_SHIFT 7
#define PADCTRLREG_SSPDI_HYS_EN_SSPDI_MASK 0x00000080
#define PADCTRLREG_SSPDI_PDN_SSPDI_SHIFT 6
#define PADCTRLREG_SSPDI_PDN_SSPDI_MASK 0x00000040
#define PADCTRLREG_SSPDI_PUP_SSPDI_SHIFT 5
#define PADCTRLREG_SSPDI_PUP_SSPDI_MASK 0x00000020
#define PADCTRLREG_SSPDI_SRC_SSPDI_SHIFT 4
#define PADCTRLREG_SSPDI_SRC_SSPDI_MASK 0x00000010
#define PADCTRLREG_SSPDI_IND_SSPDI_SHIFT 3
#define PADCTRLREG_SSPDI_IND_SSPDI_MASK 0x00000008
#define PADCTRLREG_SSPDI_SEL_2_SSPDI_SHIFT 2
#define PADCTRLREG_SSPDI_SEL_2_SSPDI_MASK 0x00000004
#define PADCTRLREG_SSPDI_SEL_1_SSPDI_SHIFT 1
#define PADCTRLREG_SSPDI_SEL_1_SSPDI_MASK 0x00000002
#define PADCTRLREG_SSPDI_SEL_0_SSPDI_SHIFT 0
#define PADCTRLREG_SSPDI_SEL_0_SSPDI_MASK 0x00000001
#define PADCTRLREG_SSPDO_OFFSET 0x000001E8
#define PADCTRLREG_SSPDO_TYPE UInt32
#define PADCTRLREG_SSPDO_RESERVED_MASK 0xFFFFE000
#define PADCTRLREG_SSPDO_PUPM1_SSPDO_SHIFT 12
#define PADCTRLREG_SSPDO_PUPM1_SSPDO_MASK 0x00001000
#define PADCTRLREG_SSPDO_PUPM0_SSPDO_SHIFT 11
#define PADCTRLREG_SSPDO_PUPM0_SSPDO_MASK 0x00000800
#define PADCTRLREG_SSPDO_PINSEL_SSPDO_SHIFT 8
#define PADCTRLREG_SSPDO_PINSEL_SSPDO_MASK 0x00000700
#define PADCTRLREG_SSPDO_HYS_EN_SSPDO_SHIFT 7
#define PADCTRLREG_SSPDO_HYS_EN_SSPDO_MASK 0x00000080
#define PADCTRLREG_SSPDO_PDN_SSPDO_SHIFT 6
#define PADCTRLREG_SSPDO_PDN_SSPDO_MASK 0x00000040
#define PADCTRLREG_SSPDO_PUP_SSPDO_SHIFT 5
#define PADCTRLREG_SSPDO_PUP_SSPDO_MASK 0x00000020
#define PADCTRLREG_SSPDO_SRC_SSPDO_SHIFT 4
#define PADCTRLREG_SSPDO_SRC_SSPDO_MASK 0x00000010
#define PADCTRLREG_SSPDO_IND_SSPDO_SHIFT 3
#define PADCTRLREG_SSPDO_IND_SSPDO_MASK 0x00000008
#define PADCTRLREG_SSPDO_SEL_2_SSPDO_SHIFT 2
#define PADCTRLREG_SSPDO_SEL_2_SSPDO_MASK 0x00000004
#define PADCTRLREG_SSPDO_SEL_1_SSPDO_SHIFT 1
#define PADCTRLREG_SSPDO_SEL_1_SSPDO_MASK 0x00000002
#define PADCTRLREG_SSPDO_SEL_0_SSPDO_SHIFT 0
#define PADCTRLREG_SSPDO_SEL_0_SSPDO_MASK 0x00000001
#define PADCTRLREG_SSPSYN_OFFSET 0x000001EC
#define PADCTRLREG_SSPSYN_TYPE UInt32
#define PADCTRLREG_SSPSYN_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SSPSYN_PINSEL_SSPSYN_SHIFT 8
#define PADCTRLREG_SSPSYN_PINSEL_SSPSYN_MASK 0x00000700
#define PADCTRLREG_SSPSYN_HYS_EN_SSPSYN_SHIFT 7
#define PADCTRLREG_SSPSYN_HYS_EN_SSPSYN_MASK 0x00000080
#define PADCTRLREG_SSPSYN_PDN_SSPSYN_SHIFT 6
#define PADCTRLREG_SSPSYN_PDN_SSPSYN_MASK 0x00000040
#define PADCTRLREG_SSPSYN_PUP_SSPSYN_SHIFT 5
#define PADCTRLREG_SSPSYN_PUP_SSPSYN_MASK 0x00000020
#define PADCTRLREG_SSPSYN_SRC_SSPSYN_SHIFT 4
#define PADCTRLREG_SSPSYN_SRC_SSPSYN_MASK 0x00000010
#define PADCTRLREG_SSPSYN_IND_SSPSYN_SHIFT 3
#define PADCTRLREG_SSPSYN_IND_SSPSYN_MASK 0x00000008
#define PADCTRLREG_SSPSYN_SEL_2_SSPSYN_SHIFT 2
#define PADCTRLREG_SSPSYN_SEL_2_SSPSYN_MASK 0x00000004
#define PADCTRLREG_SSPSYN_SEL_1_SSPSYN_SHIFT 1
#define PADCTRLREG_SSPSYN_SEL_1_SSPSYN_MASK 0x00000002
#define PADCTRLREG_SSPSYN_SEL_0_SSPSYN_SHIFT 0
#define PADCTRLREG_SSPSYN_SEL_0_SSPSYN_MASK 0x00000001
#define PADCTRLREG_STAT1_OFFSET 0x000001F0
#define PADCTRLREG_STAT1_TYPE UInt32
#define PADCTRLREG_STAT1_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_STAT1_PINSEL_STAT1_SHIFT 8
#define PADCTRLREG_STAT1_PINSEL_STAT1_MASK 0x00000700
#define PADCTRLREG_STAT1_HYS_EN_STAT1_SHIFT 7
#define PADCTRLREG_STAT1_HYS_EN_STAT1_MASK 0x00000080
#define PADCTRLREG_STAT1_PDN_STAT1_SHIFT 6
#define PADCTRLREG_STAT1_PDN_STAT1_MASK 0x00000040
#define PADCTRLREG_STAT1_PUP_STAT1_SHIFT 5
#define PADCTRLREG_STAT1_PUP_STAT1_MASK 0x00000020
#define PADCTRLREG_STAT1_SRC_STAT1_SHIFT 4
#define PADCTRLREG_STAT1_SRC_STAT1_MASK 0x00000010
#define PADCTRLREG_STAT1_IND_STAT1_SHIFT 3
#define PADCTRLREG_STAT1_IND_STAT1_MASK 0x00000008
#define PADCTRLREG_STAT1_SEL_2_STAT1_SHIFT 2
#define PADCTRLREG_STAT1_SEL_2_STAT1_MASK 0x00000004
#define PADCTRLREG_STAT1_SEL_1_STAT1_SHIFT 1
#define PADCTRLREG_STAT1_SEL_1_STAT1_MASK 0x00000002
#define PADCTRLREG_STAT1_SEL_0_STAT1_SHIFT 0
#define PADCTRLREG_STAT1_SEL_0_STAT1_MASK 0x00000001
#define PADCTRLREG_STAT2_OFFSET 0x000001F4
#define PADCTRLREG_STAT2_TYPE UInt32
#define PADCTRLREG_STAT2_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_STAT2_PINSEL_STAT2_SHIFT 8
#define PADCTRLREG_STAT2_PINSEL_STAT2_MASK 0x00000700
#define PADCTRLREG_STAT2_HYS_EN_STAT2_SHIFT 7
#define PADCTRLREG_STAT2_HYS_EN_STAT2_MASK 0x00000080
#define PADCTRLREG_STAT2_PDN_STAT2_SHIFT 6
#define PADCTRLREG_STAT2_PDN_STAT2_MASK 0x00000040
#define PADCTRLREG_STAT2_PUP_STAT2_SHIFT 5
#define PADCTRLREG_STAT2_PUP_STAT2_MASK 0x00000020
#define PADCTRLREG_STAT2_SRC_STAT2_SHIFT 4
#define PADCTRLREG_STAT2_SRC_STAT2_MASK 0x00000010
#define PADCTRLREG_STAT2_IND_STAT2_SHIFT 3
#define PADCTRLREG_STAT2_IND_STAT2_MASK 0x00000008
#define PADCTRLREG_STAT2_SEL_2_STAT2_SHIFT 2
#define PADCTRLREG_STAT2_SEL_2_STAT2_MASK 0x00000004
#define PADCTRLREG_STAT2_SEL_1_STAT2_SHIFT 1
#define PADCTRLREG_STAT2_SEL_1_STAT2_MASK 0x00000002
#define PADCTRLREG_STAT2_SEL_0_STAT2_SHIFT 0
#define PADCTRLREG_STAT2_SEL_0_STAT2_MASK 0x00000001
#define PADCTRLREG_SWCLKTCK_OFFSET 0x000001F8
#define PADCTRLREG_SWCLKTCK_TYPE UInt32
#define PADCTRLREG_SWCLKTCK_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SWCLKTCK_PINSEL_SWCLKTCK_SHIFT 8
#define PADCTRLREG_SWCLKTCK_PINSEL_SWCLKTCK_MASK 0x00000700
#define PADCTRLREG_SWCLKTCK_HYS_EN_SWCLKTCK_SHIFT 7
#define PADCTRLREG_SWCLKTCK_HYS_EN_SWCLKTCK_MASK 0x00000080
#define PADCTRLREG_SWCLKTCK_PDN_SWCLKTCK_SHIFT 6
#define PADCTRLREG_SWCLKTCK_PDN_SWCLKTCK_MASK 0x00000040
#define PADCTRLREG_SWCLKTCK_PUP_SWCLKTCK_SHIFT 5
#define PADCTRLREG_SWCLKTCK_PUP_SWCLKTCK_MASK 0x00000020
#define PADCTRLREG_SWCLKTCK_SRC_SWCLKTCK_SHIFT 4
#define PADCTRLREG_SWCLKTCK_SRC_SWCLKTCK_MASK 0x00000010
#define PADCTRLREG_SWCLKTCK_IND_SWCLKTCK_SHIFT 3
#define PADCTRLREG_SWCLKTCK_IND_SWCLKTCK_MASK 0x00000008
#define PADCTRLREG_SWCLKTCK_SEL_2_SWCLKTCK_SHIFT 2
#define PADCTRLREG_SWCLKTCK_SEL_2_SWCLKTCK_MASK 0x00000004
#define PADCTRLREG_SWCLKTCK_SEL_1_SWCLKTCK_SHIFT 1
#define PADCTRLREG_SWCLKTCK_SEL_1_SWCLKTCK_MASK 0x00000002
#define PADCTRLREG_SWCLKTCK_SEL_0_SWCLKTCK_SHIFT 0
#define PADCTRLREG_SWCLKTCK_SEL_0_SWCLKTCK_MASK 0x00000001
#define PADCTRLREG_SWDIOTMS_OFFSET 0x000001FC
#define PADCTRLREG_SWDIOTMS_TYPE UInt32
#define PADCTRLREG_SWDIOTMS_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SWDIOTMS_PINSEL_SWDIOTMS_SHIFT 8
#define PADCTRLREG_SWDIOTMS_PINSEL_SWDIOTMS_MASK 0x00000700
#define PADCTRLREG_SWDIOTMS_HYS_EN_SWDIOTMS_SHIFT 7
#define PADCTRLREG_SWDIOTMS_HYS_EN_SWDIOTMS_MASK 0x00000080
#define PADCTRLREG_SWDIOTMS_PDN_SWDIOTMS_SHIFT 6
#define PADCTRLREG_SWDIOTMS_PDN_SWDIOTMS_MASK 0x00000040
#define PADCTRLREG_SWDIOTMS_PUP_SWDIOTMS_SHIFT 5
#define PADCTRLREG_SWDIOTMS_PUP_SWDIOTMS_MASK 0x00000020
#define PADCTRLREG_SWDIOTMS_SRC_SWDIOTMS_SHIFT 4
#define PADCTRLREG_SWDIOTMS_SRC_SWDIOTMS_MASK 0x00000010
#define PADCTRLREG_SWDIOTMS_IND_SWDIOTMS_SHIFT 3
#define PADCTRLREG_SWDIOTMS_IND_SWDIOTMS_MASK 0x00000008
#define PADCTRLREG_SWDIOTMS_SEL_2_SWDIOTMS_SHIFT 2
#define PADCTRLREG_SWDIOTMS_SEL_2_SWDIOTMS_MASK 0x00000004
#define PADCTRLREG_SWDIOTMS_SEL_1_SWDIOTMS_SHIFT 1
#define PADCTRLREG_SWDIOTMS_SEL_1_SWDIOTMS_MASK 0x00000002
#define PADCTRLREG_SWDIOTMS_SEL_0_SWDIOTMS_SHIFT 0
#define PADCTRLREG_SWDIOTMS_SEL_0_SWDIOTMS_MASK 0x00000001
#define PADCTRLREG_SYSCLKEN_OFFSET 0x00000200
#define PADCTRLREG_SYSCLKEN_TYPE UInt32
#define PADCTRLREG_SYSCLKEN_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_SYSCLKEN_PINSEL_SYSCLKEN_SHIFT 8
#define PADCTRLREG_SYSCLKEN_PINSEL_SYSCLKEN_MASK 0x00000700
#define PADCTRLREG_SYSCLKEN_HYS_EN_SYSCLKEN_SHIFT 7
#define PADCTRLREG_SYSCLKEN_HYS_EN_SYSCLKEN_MASK 0x00000080
#define PADCTRLREG_SYSCLKEN_PDN_SYSCLKEN_SHIFT 6
#define PADCTRLREG_SYSCLKEN_PDN_SYSCLKEN_MASK 0x00000040
#define PADCTRLREG_SYSCLKEN_PUP_SYSCLKEN_SHIFT 5
#define PADCTRLREG_SYSCLKEN_PUP_SYSCLKEN_MASK 0x00000020
#define PADCTRLREG_SYSCLKEN_SRC_SYSCLKEN_SHIFT 4
#define PADCTRLREG_SYSCLKEN_SRC_SYSCLKEN_MASK 0x00000010
#define PADCTRLREG_SYSCLKEN_IND_SYSCLKEN_SHIFT 3
#define PADCTRLREG_SYSCLKEN_IND_SYSCLKEN_MASK 0x00000008
#define PADCTRLREG_SYSCLKEN_SEL_2_SYSCLKEN_SHIFT 2
#define PADCTRLREG_SYSCLKEN_SEL_2_SYSCLKEN_MASK 0x00000004
#define PADCTRLREG_SYSCLKEN_SEL_1_SYSCLKEN_SHIFT 1
#define PADCTRLREG_SYSCLKEN_SEL_1_SYSCLKEN_MASK 0x00000002
#define PADCTRLREG_SYSCLKEN_SEL_0_SYSCLKEN_SHIFT 0
#define PADCTRLREG_SYSCLKEN_SEL_0_SYSCLKEN_MASK 0x00000001
#define PADCTRLREG_TDI_OFFSET 0x00000204
#define PADCTRLREG_TDI_TYPE UInt32
#define PADCTRLREG_TDI_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TDI_PINSEL_TDI_SHIFT 8
#define PADCTRLREG_TDI_PINSEL_TDI_MASK 0x00000700
#define PADCTRLREG_TDI_HYS_EN_TDI_SHIFT 7
#define PADCTRLREG_TDI_HYS_EN_TDI_MASK 0x00000080
#define PADCTRLREG_TDI_PDN_TDI_SHIFT 6
#define PADCTRLREG_TDI_PDN_TDI_MASK 0x00000040
#define PADCTRLREG_TDI_PUP_TDI_SHIFT 5
#define PADCTRLREG_TDI_PUP_TDI_MASK 0x00000020
#define PADCTRLREG_TDI_SRC_TDI_SHIFT 4
#define PADCTRLREG_TDI_SRC_TDI_MASK 0x00000010
#define PADCTRLREG_TDI_IND_TDI_SHIFT 3
#define PADCTRLREG_TDI_IND_TDI_MASK 0x00000008
#define PADCTRLREG_TDI_SEL_2_TDI_SHIFT 2
#define PADCTRLREG_TDI_SEL_2_TDI_MASK 0x00000004
#define PADCTRLREG_TDI_SEL_1_TDI_SHIFT 1
#define PADCTRLREG_TDI_SEL_1_TDI_MASK 0x00000002
#define PADCTRLREG_TDI_SEL_0_TDI_SHIFT 0
#define PADCTRLREG_TDI_SEL_0_TDI_MASK 0x00000001
#define PADCTRLREG_TDO_OFFSET 0x00000208
#define PADCTRLREG_TDO_TYPE UInt32
#define PADCTRLREG_TDO_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TDO_PINSEL_TDO_SHIFT 8
#define PADCTRLREG_TDO_PINSEL_TDO_MASK 0x00000700
#define PADCTRLREG_TDO_HYS_EN_TDO_SHIFT 7
#define PADCTRLREG_TDO_HYS_EN_TDO_MASK 0x00000080
#define PADCTRLREG_TDO_PDN_TDO_SHIFT 6
#define PADCTRLREG_TDO_PDN_TDO_MASK 0x00000040
#define PADCTRLREG_TDO_PUP_TDO_SHIFT 5
#define PADCTRLREG_TDO_PUP_TDO_MASK 0x00000020
#define PADCTRLREG_TDO_SRC_TDO_SHIFT 4
#define PADCTRLREG_TDO_SRC_TDO_MASK 0x00000010
#define PADCTRLREG_TDO_IND_TDO_SHIFT 3
#define PADCTRLREG_TDO_IND_TDO_MASK 0x00000008
#define PADCTRLREG_TDO_SEL_2_TDO_SHIFT 2
#define PADCTRLREG_TDO_SEL_2_TDO_MASK 0x00000004
#define PADCTRLREG_TDO_SEL_1_TDO_SHIFT 1
#define PADCTRLREG_TDO_SEL_1_TDO_MASK 0x00000002
#define PADCTRLREG_TDO_SEL_0_TDO_SHIFT 0
#define PADCTRLREG_TDO_SEL_0_TDO_MASK 0x00000001
#define PADCTRLREG_TESTMODE_OFFSET 0x0000020C
#define PADCTRLREG_TESTMODE_TYPE UInt32
#define PADCTRLREG_TESTMODE_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TESTMODE_PINSEL_TESTMODE_SHIFT 8
#define PADCTRLREG_TESTMODE_PINSEL_TESTMODE_MASK 0x00000700
#define PADCTRLREG_TESTMODE_HYS_EN_TESTMODE_SHIFT 7
#define PADCTRLREG_TESTMODE_HYS_EN_TESTMODE_MASK 0x00000080
#define PADCTRLREG_TESTMODE_PDN_TESTMODE_SHIFT 6
#define PADCTRLREG_TESTMODE_PDN_TESTMODE_MASK 0x00000040
#define PADCTRLREG_TESTMODE_PUP_TESTMODE_SHIFT 5
#define PADCTRLREG_TESTMODE_PUP_TESTMODE_MASK 0x00000020
#define PADCTRLREG_TESTMODE_SRC_TESTMODE_SHIFT 4
#define PADCTRLREG_TESTMODE_SRC_TESTMODE_MASK 0x00000010
#define PADCTRLREG_TESTMODE_IND_TESTMODE_SHIFT 3
#define PADCTRLREG_TESTMODE_IND_TESTMODE_MASK 0x00000008
#define PADCTRLREG_TESTMODE_SEL_2_TESTMODE_SHIFT 2
#define PADCTRLREG_TESTMODE_SEL_2_TESTMODE_MASK 0x00000004
#define PADCTRLREG_TESTMODE_SEL_1_TESTMODE_SHIFT 1
#define PADCTRLREG_TESTMODE_SEL_1_TESTMODE_MASK 0x00000002
#define PADCTRLREG_TESTMODE_SEL_0_TESTMODE_SHIFT 0
#define PADCTRLREG_TESTMODE_SEL_0_TESTMODE_MASK 0x00000001
#define PADCTRLREG_TRACECLK_OFFSET 0x00000210
#define PADCTRLREG_TRACECLK_TYPE UInt32
#define PADCTRLREG_TRACECLK_RESERVED_MASK 0xFFFFF884
#define PADCTRLREG_TRACECLK_PINSEL_TRACECLK_SHIFT 8
#define PADCTRLREG_TRACECLK_PINSEL_TRACECLK_MASK 0x00000700
#define PADCTRLREG_TRACECLK_PDN_TRACECLK_SHIFT 6
#define PADCTRLREG_TRACECLK_PDN_TRACECLK_MASK 0x00000040
#define PADCTRLREG_TRACECLK_PUP_TRACECLK_SHIFT 5
#define PADCTRLREG_TRACECLK_PUP_TRACECLK_MASK 0x00000020
#define PADCTRLREG_TRACECLK_SRC_TRACECLK_SHIFT 4
#define PADCTRLREG_TRACECLK_SRC_TRACECLK_MASK 0x00000010
#define PADCTRLREG_TRACECLK_IND_TRACECLK_SHIFT 3
#define PADCTRLREG_TRACECLK_IND_TRACECLK_MASK 0x00000008
#define PADCTRLREG_TRACECLK_SEL_1_TRACECLK_SHIFT 1
#define PADCTRLREG_TRACECLK_SEL_1_TRACECLK_MASK 0x00000002
#define PADCTRLREG_TRACECLK_SEL_0_TRACECLK_SHIFT 0
#define PADCTRLREG_TRACECLK_SEL_0_TRACECLK_MASK 0x00000001
#define PADCTRLREG_TRACEDT00_OFFSET 0x00000214
#define PADCTRLREG_TRACEDT00_TYPE UInt32
#define PADCTRLREG_TRACEDT00_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT00_PINSEL_TRACEDT00_SHIFT 8
#define PADCTRLREG_TRACEDT00_PINSEL_TRACEDT00_MASK 0x00000700
#define PADCTRLREG_TRACEDT00_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT00_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT00_PDN_TRACEDT00_SHIFT 6
#define PADCTRLREG_TRACEDT00_PDN_TRACEDT00_MASK 0x00000040
#define PADCTRLREG_TRACEDT00_PUP_TRACEDT00_SHIFT 5
#define PADCTRLREG_TRACEDT00_PUP_TRACEDT00_MASK 0x00000020
#define PADCTRLREG_TRACEDT00_SRC_TRACEDT00_SHIFT 4
#define PADCTRLREG_TRACEDT00_SRC_TRACEDT00_MASK 0x00000010
#define PADCTRLREG_TRACEDT00_IND_TRACEDT00_SHIFT 3
#define PADCTRLREG_TRACEDT00_IND_TRACEDT00_MASK 0x00000008
#define PADCTRLREG_TRACEDT00_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT00_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT00_SEL_1_TRACEDT00_SHIFT 1
#define PADCTRLREG_TRACEDT00_SEL_1_TRACEDT00_MASK 0x00000002
#define PADCTRLREG_TRACEDT00_SEL_0_TRACEDT00_SHIFT 0
#define PADCTRLREG_TRACEDT00_SEL_0_TRACEDT00_MASK 0x00000001
#define PADCTRLREG_TRACEDT01_OFFSET 0x00000218
#define PADCTRLREG_TRACEDT01_TYPE UInt32
#define PADCTRLREG_TRACEDT01_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT01_PINSEL_TRACEDT01_SHIFT 8
#define PADCTRLREG_TRACEDT01_PINSEL_TRACEDT01_MASK 0x00000700
#define PADCTRLREG_TRACEDT01_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT01_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT01_PDN_TRACEDT01_SHIFT 6
#define PADCTRLREG_TRACEDT01_PDN_TRACEDT01_MASK 0x00000040
#define PADCTRLREG_TRACEDT01_PUP_TRACEDT01_SHIFT 5
#define PADCTRLREG_TRACEDT01_PUP_TRACEDT01_MASK 0x00000020
#define PADCTRLREG_TRACEDT01_SRC_TRACEDT01_SHIFT 4
#define PADCTRLREG_TRACEDT01_SRC_TRACEDT01_MASK 0x00000010
#define PADCTRLREG_TRACEDT01_IND_TRACEDT01_SHIFT 3
#define PADCTRLREG_TRACEDT01_IND_TRACEDT01_MASK 0x00000008
#define PADCTRLREG_TRACEDT01_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT01_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT01_SEL_1_TRACEDT01_SHIFT 1
#define PADCTRLREG_TRACEDT01_SEL_1_TRACEDT01_MASK 0x00000002
#define PADCTRLREG_TRACEDT01_SEL_0_TRACEDT01_SHIFT 0
#define PADCTRLREG_TRACEDT01_SEL_0_TRACEDT01_MASK 0x00000001
#define PADCTRLREG_TRACEDT02_OFFSET 0x0000021C
#define PADCTRLREG_TRACEDT02_TYPE UInt32
#define PADCTRLREG_TRACEDT02_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT02_PINSEL_TRACEDT02_SHIFT 8
#define PADCTRLREG_TRACEDT02_PINSEL_TRACEDT02_MASK 0x00000700
#define PADCTRLREG_TRACEDT02_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT02_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT02_PDN_TRACEDT02_SHIFT 6
#define PADCTRLREG_TRACEDT02_PDN_TRACEDT02_MASK 0x00000040
#define PADCTRLREG_TRACEDT02_PUP_TRACEDT02_SHIFT 5
#define PADCTRLREG_TRACEDT02_PUP_TRACEDT02_MASK 0x00000020
#define PADCTRLREG_TRACEDT02_SRC_TRACEDT02_SHIFT 4
#define PADCTRLREG_TRACEDT02_SRC_TRACEDT02_MASK 0x00000010
#define PADCTRLREG_TRACEDT02_IND_TRACEDT02_SHIFT 3
#define PADCTRLREG_TRACEDT02_IND_TRACEDT02_MASK 0x00000008
#define PADCTRLREG_TRACEDT02_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT02_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT02_SEL_1_TRACEDT02_SHIFT 1
#define PADCTRLREG_TRACEDT02_SEL_1_TRACEDT02_MASK 0x00000002
#define PADCTRLREG_TRACEDT02_SEL_0_TRACEDT02_SHIFT 0
#define PADCTRLREG_TRACEDT02_SEL_0_TRACEDT02_MASK 0x00000001
#define PADCTRLREG_TRACEDT03_OFFSET 0x00000220
#define PADCTRLREG_TRACEDT03_TYPE UInt32
#define PADCTRLREG_TRACEDT03_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT03_PINSEL_TRACEDT03_SHIFT 8
#define PADCTRLREG_TRACEDT03_PINSEL_TRACEDT03_MASK 0x00000700
#define PADCTRLREG_TRACEDT03_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT03_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT03_PDN_TRACEDT03_SHIFT 6
#define PADCTRLREG_TRACEDT03_PDN_TRACEDT03_MASK 0x00000040
#define PADCTRLREG_TRACEDT03_PUP_TRACEDT03_SHIFT 5
#define PADCTRLREG_TRACEDT03_PUP_TRACEDT03_MASK 0x00000020
#define PADCTRLREG_TRACEDT03_SRC_TRACEDT03_SHIFT 4
#define PADCTRLREG_TRACEDT03_SRC_TRACEDT03_MASK 0x00000010
#define PADCTRLREG_TRACEDT03_IND_TRACEDT03_SHIFT 3
#define PADCTRLREG_TRACEDT03_IND_TRACEDT03_MASK 0x00000008
#define PADCTRLREG_TRACEDT03_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT03_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT03_SEL_1_TRACEDT03_SHIFT 1
#define PADCTRLREG_TRACEDT03_SEL_1_TRACEDT03_MASK 0x00000002
#define PADCTRLREG_TRACEDT03_SEL_0_TRACEDT03_SHIFT 0
#define PADCTRLREG_TRACEDT03_SEL_0_TRACEDT03_MASK 0x00000001
#define PADCTRLREG_TRACEDT04_OFFSET 0x00000224
#define PADCTRLREG_TRACEDT04_TYPE UInt32
#define PADCTRLREG_TRACEDT04_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT04_PINSEL_TRACEDT04_SHIFT 8
#define PADCTRLREG_TRACEDT04_PINSEL_TRACEDT04_MASK 0x00000700
#define PADCTRLREG_TRACEDT04_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT04_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT04_PDN_TRACEDT04_SHIFT 6
#define PADCTRLREG_TRACEDT04_PDN_TRACEDT04_MASK 0x00000040
#define PADCTRLREG_TRACEDT04_PUP_TRACEDT04_SHIFT 5
#define PADCTRLREG_TRACEDT04_PUP_TRACEDT04_MASK 0x00000020
#define PADCTRLREG_TRACEDT04_SRC_TRACEDT04_SHIFT 4
#define PADCTRLREG_TRACEDT04_SRC_TRACEDT04_MASK 0x00000010
#define PADCTRLREG_TRACEDT04_IND_TRACEDT04_SHIFT 3
#define PADCTRLREG_TRACEDT04_IND_TRACEDT04_MASK 0x00000008
#define PADCTRLREG_TRACEDT04_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT04_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT04_SEL_1_TRACEDT04_SHIFT 1
#define PADCTRLREG_TRACEDT04_SEL_1_TRACEDT04_MASK 0x00000002
#define PADCTRLREG_TRACEDT04_SEL_0_TRACEDT04_SHIFT 0
#define PADCTRLREG_TRACEDT04_SEL_0_TRACEDT04_MASK 0x00000001
#define PADCTRLREG_TRACEDT05_OFFSET 0x00000228
#define PADCTRLREG_TRACEDT05_TYPE UInt32
#define PADCTRLREG_TRACEDT05_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT05_PINSEL_TRACEDT05_SHIFT 8
#define PADCTRLREG_TRACEDT05_PINSEL_TRACEDT05_MASK 0x00000700
#define PADCTRLREG_TRACEDT05_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT05_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT05_PDN_TRACEDT05_SHIFT 6
#define PADCTRLREG_TRACEDT05_PDN_TRACEDT05_MASK 0x00000040
#define PADCTRLREG_TRACEDT05_PUP_TRACEDT05_SHIFT 5
#define PADCTRLREG_TRACEDT05_PUP_TRACEDT05_MASK 0x00000020
#define PADCTRLREG_TRACEDT05_SRC_TRACEDT05_SHIFT 4
#define PADCTRLREG_TRACEDT05_SRC_TRACEDT05_MASK 0x00000010
#define PADCTRLREG_TRACEDT05_IND_TRACEDT05_SHIFT 3
#define PADCTRLREG_TRACEDT05_IND_TRACEDT05_MASK 0x00000008
#define PADCTRLREG_TRACEDT05_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT05_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT05_SEL_1_TRACEDT05_SHIFT 1
#define PADCTRLREG_TRACEDT05_SEL_1_TRACEDT05_MASK 0x00000002
#define PADCTRLREG_TRACEDT05_SEL_0_TRACEDT05_SHIFT 0
#define PADCTRLREG_TRACEDT05_SEL_0_TRACEDT05_MASK 0x00000001
#define PADCTRLREG_TRACEDT06_OFFSET 0x0000022C
#define PADCTRLREG_TRACEDT06_TYPE UInt32
#define PADCTRLREG_TRACEDT06_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT06_PINSEL_TRACEDT06_SHIFT 8
#define PADCTRLREG_TRACEDT06_PINSEL_TRACEDT06_MASK 0x00000700
#define PADCTRLREG_TRACEDT06_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT06_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT06_PDN_TRACEDT06_SHIFT 6
#define PADCTRLREG_TRACEDT06_PDN_TRACEDT06_MASK 0x00000040
#define PADCTRLREG_TRACEDT06_PUP_TRACEDT06_SHIFT 5
#define PADCTRLREG_TRACEDT06_PUP_TRACEDT06_MASK 0x00000020
#define PADCTRLREG_TRACEDT06_SRC_TRACEDT06_SHIFT 4
#define PADCTRLREG_TRACEDT06_SRC_TRACEDT06_MASK 0x00000010
#define PADCTRLREG_TRACEDT06_IND_TRACEDT06_SHIFT 3
#define PADCTRLREG_TRACEDT06_IND_TRACEDT06_MASK 0x00000008
#define PADCTRLREG_TRACEDT06_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT06_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT06_SEL_1_TRACEDT06_SHIFT 1
#define PADCTRLREG_TRACEDT06_SEL_1_TRACEDT06_MASK 0x00000002
#define PADCTRLREG_TRACEDT06_SEL_0_TRACEDT06_SHIFT 0
#define PADCTRLREG_TRACEDT06_SEL_0_TRACEDT06_MASK 0x00000001
#define PADCTRLREG_TRACEDT07_OFFSET 0x00000230
#define PADCTRLREG_TRACEDT07_TYPE UInt32
#define PADCTRLREG_TRACEDT07_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT07_PINSEL_TRACEDT07_SHIFT 8
#define PADCTRLREG_TRACEDT07_PINSEL_TRACEDT07_MASK 0x00000700
#define PADCTRLREG_TRACEDT07_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT07_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT07_PDN_TRACEDT07_SHIFT 6
#define PADCTRLREG_TRACEDT07_PDN_TRACEDT07_MASK 0x00000040
#define PADCTRLREG_TRACEDT07_PUP_TRACEDT07_SHIFT 5
#define PADCTRLREG_TRACEDT07_PUP_TRACEDT07_MASK 0x00000020
#define PADCTRLREG_TRACEDT07_SRC_TRACEDT07_SHIFT 4
#define PADCTRLREG_TRACEDT07_SRC_TRACEDT07_MASK 0x00000010
#define PADCTRLREG_TRACEDT07_IND_TRACEDT07_SHIFT 3
#define PADCTRLREG_TRACEDT07_IND_TRACEDT07_MASK 0x00000008
#define PADCTRLREG_TRACEDT07_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT07_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT07_SEL_1_TRACEDT07_SHIFT 1
#define PADCTRLREG_TRACEDT07_SEL_1_TRACEDT07_MASK 0x00000002
#define PADCTRLREG_TRACEDT07_SEL_0_TRACEDT07_SHIFT 0
#define PADCTRLREG_TRACEDT07_SEL_0_TRACEDT07_MASK 0x00000001
#define PADCTRLREG_TRSTB_OFFSET 0x00000234
#define PADCTRLREG_TRSTB_TYPE UInt32
#define PADCTRLREG_TRSTB_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRSTB_PINSEL_TRSTB_SHIFT 8
#define PADCTRLREG_TRSTB_PINSEL_TRSTB_MASK 0x00000700
#define PADCTRLREG_TRSTB_HYS_EN_TRSTB_SHIFT 7
#define PADCTRLREG_TRSTB_HYS_EN_TRSTB_MASK 0x00000080
#define PADCTRLREG_TRSTB_PDN_TRSTB_SHIFT 6
#define PADCTRLREG_TRSTB_PDN_TRSTB_MASK 0x00000040
#define PADCTRLREG_TRSTB_PUP_TRSTB_SHIFT 5
#define PADCTRLREG_TRSTB_PUP_TRSTB_MASK 0x00000020
#define PADCTRLREG_TRSTB_SRC_TRSTB_SHIFT 4
#define PADCTRLREG_TRSTB_SRC_TRSTB_MASK 0x00000010
#define PADCTRLREG_TRSTB_IND_TRSTB_SHIFT 3
#define PADCTRLREG_TRSTB_IND_TRSTB_MASK 0x00000008
#define PADCTRLREG_TRSTB_SEL_2_TRSTB_SHIFT 2
#define PADCTRLREG_TRSTB_SEL_2_TRSTB_MASK 0x00000004
#define PADCTRLREG_TRSTB_SEL_1_TRSTB_SHIFT 1
#define PADCTRLREG_TRSTB_SEL_1_TRSTB_MASK 0x00000002
#define PADCTRLREG_TRSTB_SEL_0_TRSTB_SHIFT 0
#define PADCTRLREG_TRSTB_SEL_0_TRSTB_MASK 0x00000001
#define PADCTRLREG_TXDATA3G0_OFFSET 0x00000238
#define PADCTRLREG_TXDATA3G0_TYPE UInt32
#define PADCTRLREG_TXDATA3G0_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TXDATA3G0_PINSEL_TXDATA3G0_SHIFT 8
#define PADCTRLREG_TXDATA3G0_PINSEL_TXDATA3G0_MASK 0x00000700
#define PADCTRLREG_TXDATA3G0_HYS_EN_TXDATA3G0_SHIFT 7
#define PADCTRLREG_TXDATA3G0_HYS_EN_TXDATA3G0_MASK 0x00000080
#define PADCTRLREG_TXDATA3G0_PDN_TXDATA3G0_SHIFT 6
#define PADCTRLREG_TXDATA3G0_PDN_TXDATA3G0_MASK 0x00000040
#define PADCTRLREG_TXDATA3G0_PUP_TXDATA3G0_SHIFT 5
#define PADCTRLREG_TXDATA3G0_PUP_TXDATA3G0_MASK 0x00000020
#define PADCTRLREG_TXDATA3G0_SRC_TXDATA3G0_SHIFT 4
#define PADCTRLREG_TXDATA3G0_SRC_TXDATA3G0_MASK 0x00000010
#define PADCTRLREG_TXDATA3G0_IND_TXDATA3G0_SHIFT 3
#define PADCTRLREG_TXDATA3G0_IND_TXDATA3G0_MASK 0x00000008
#define PADCTRLREG_TXDATA3G0_SEL_2_TXDATA3G0_SHIFT 2
#define PADCTRLREG_TXDATA3G0_SEL_2_TXDATA3G0_MASK 0x00000004
#define PADCTRLREG_TXDATA3G0_SEL_1_TXDATA3G0_SHIFT 1
#define PADCTRLREG_TXDATA3G0_SEL_1_TXDATA3G0_MASK 0x00000002
#define PADCTRLREG_TXDATA3G0_SEL_0_TXDATA3G0_SHIFT 0
#define PADCTRLREG_TXDATA3G0_SEL_0_TXDATA3G0_MASK 0x00000001
#define PADCTRLREG_UBCTSN_OFFSET 0x0000023C
#define PADCTRLREG_UBCTSN_TYPE UInt32
#define PADCTRLREG_UBCTSN_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_UBCTSN_PINSEL_UBCTSN_SHIFT 8
#define PADCTRLREG_UBCTSN_PINSEL_UBCTSN_MASK 0x00000700
#define PADCTRLREG_UBCTSN_HYS_EN_UBCTSN_SHIFT 7
#define PADCTRLREG_UBCTSN_HYS_EN_UBCTSN_MASK 0x00000080
#define PADCTRLREG_UBCTSN_PDN_UBCTSN_SHIFT 6
#define PADCTRLREG_UBCTSN_PDN_UBCTSN_MASK 0x00000040
#define PADCTRLREG_UBCTSN_PUP_UBCTSN_SHIFT 5
#define PADCTRLREG_UBCTSN_PUP_UBCTSN_MASK 0x00000020
#define PADCTRLREG_UBCTSN_SRC_UBCTSN_SHIFT 4
#define PADCTRLREG_UBCTSN_SRC_UBCTSN_MASK 0x00000010
#define PADCTRLREG_UBCTSN_IND_UBCTSN_SHIFT 3
#define PADCTRLREG_UBCTSN_IND_UBCTSN_MASK 0x00000008
#define PADCTRLREG_UBCTSN_SEL_2_UBCTSN_SHIFT 2
#define PADCTRLREG_UBCTSN_SEL_2_UBCTSN_MASK 0x00000004
#define PADCTRLREG_UBCTSN_SEL_1_UBCTSN_SHIFT 1
#define PADCTRLREG_UBCTSN_SEL_1_UBCTSN_MASK 0x00000002
#define PADCTRLREG_UBCTSN_SEL_0_UBCTSN_SHIFT 0
#define PADCTRLREG_UBCTSN_SEL_0_UBCTSN_MASK 0x00000001
#define PADCTRLREG_UBRTSN_OFFSET 0x00000240
#define PADCTRLREG_UBRTSN_TYPE UInt32
#define PADCTRLREG_UBRTSN_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_UBRTSN_PINSEL_UBRTSN_SHIFT 8
#define PADCTRLREG_UBRTSN_PINSEL_UBRTSN_MASK 0x00000700
#define PADCTRLREG_UBRTSN_HYS_EN_UBRTSN_SHIFT 7
#define PADCTRLREG_UBRTSN_HYS_EN_UBRTSN_MASK 0x00000080
#define PADCTRLREG_UBRTSN_PDN_UBRTSN_SHIFT 6
#define PADCTRLREG_UBRTSN_PDN_UBRTSN_MASK 0x00000040
#define PADCTRLREG_UBRTSN_PUP_UBRTSN_SHIFT 5
#define PADCTRLREG_UBRTSN_PUP_UBRTSN_MASK 0x00000020
#define PADCTRLREG_UBRTSN_SRC_UBRTSN_SHIFT 4
#define PADCTRLREG_UBRTSN_SRC_UBRTSN_MASK 0x00000010
#define PADCTRLREG_UBRTSN_IND_UBRTSN_SHIFT 3
#define PADCTRLREG_UBRTSN_IND_UBRTSN_MASK 0x00000008
#define PADCTRLREG_UBRTSN_SEL_2_UBRTSN_SHIFT 2
#define PADCTRLREG_UBRTSN_SEL_2_UBRTSN_MASK 0x00000004
#define PADCTRLREG_UBRTSN_SEL_1_UBRTSN_SHIFT 1
#define PADCTRLREG_UBRTSN_SEL_1_UBRTSN_MASK 0x00000002
#define PADCTRLREG_UBRTSN_SEL_0_UBRTSN_SHIFT 0
#define PADCTRLREG_UBRTSN_SEL_0_UBRTSN_MASK 0x00000001
#define PADCTRLREG_UBRX_OFFSET 0x00000244
#define PADCTRLREG_UBRX_TYPE UInt32
#define PADCTRLREG_UBRX_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_UBRX_PINSEL_UBRX_SHIFT 8
#define PADCTRLREG_UBRX_PINSEL_UBRX_MASK 0x00000700
#define PADCTRLREG_UBRX_HYS_EN_UBRX_SHIFT 7
#define PADCTRLREG_UBRX_HYS_EN_UBRX_MASK 0x00000080
#define PADCTRLREG_UBRX_PDN_UBRX_SHIFT 6
#define PADCTRLREG_UBRX_PDN_UBRX_MASK 0x00000040
#define PADCTRLREG_UBRX_PUP_UBRX_SHIFT 5
#define PADCTRLREG_UBRX_PUP_UBRX_MASK 0x00000020
#define PADCTRLREG_UBRX_SRC_UBRX_SHIFT 4
#define PADCTRLREG_UBRX_SRC_UBRX_MASK 0x00000010
#define PADCTRLREG_UBRX_IND_UBRX_SHIFT 3
#define PADCTRLREG_UBRX_IND_UBRX_MASK 0x00000008
#define PADCTRLREG_UBRX_SEL_2_UBRX_SHIFT 2
#define PADCTRLREG_UBRX_SEL_2_UBRX_MASK 0x00000004
#define PADCTRLREG_UBRX_SEL_1_UBRX_SHIFT 1
#define PADCTRLREG_UBRX_SEL_1_UBRX_MASK 0x00000002
#define PADCTRLREG_UBRX_SEL_0_UBRX_SHIFT 0
#define PADCTRLREG_UBRX_SEL_0_UBRX_MASK 0x00000001
#define PADCTRLREG_UBTX_OFFSET 0x00000248
#define PADCTRLREG_UBTX_TYPE UInt32
#define PADCTRLREG_UBTX_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_UBTX_PINSEL_UBTX_SHIFT 8
#define PADCTRLREG_UBTX_PINSEL_UBTX_MASK 0x00000700
#define PADCTRLREG_UBTX_HYS_EN_UBTX_SHIFT 7
#define PADCTRLREG_UBTX_HYS_EN_UBTX_MASK 0x00000080
#define PADCTRLREG_UBTX_PDN_UBTX_SHIFT 6
#define PADCTRLREG_UBTX_PDN_UBTX_MASK 0x00000040
#define PADCTRLREG_UBTX_PUP_UBTX_SHIFT 5
#define PADCTRLREG_UBTX_PUP_UBTX_MASK 0x00000020
#define PADCTRLREG_UBTX_SRC_UBTX_SHIFT 4
#define PADCTRLREG_UBTX_SRC_UBTX_MASK 0x00000010
#define PADCTRLREG_UBTX_IND_UBTX_SHIFT 3
#define PADCTRLREG_UBTX_IND_UBTX_MASK 0x00000008
#define PADCTRLREG_UBTX_SEL_2_UBTX_SHIFT 2
#define PADCTRLREG_UBTX_SEL_2_UBTX_MASK 0x00000004
#define PADCTRLREG_UBTX_SEL_1_UBTX_SHIFT 1
#define PADCTRLREG_UBTX_SEL_1_UBTX_MASK 0x00000002
#define PADCTRLREG_UBTX_SEL_0_UBTX_SHIFT 0
#define PADCTRLREG_UBTX_SEL_0_UBTX_MASK 0x00000001
#define PADCTRLREG_TRACEDT08_OFFSET 0x0000024C
#define PADCTRLREG_TRACEDT08_TYPE UInt32
#define PADCTRLREG_TRACEDT08_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT08_PINSEL_TRACEDT08_SHIFT 8
#define PADCTRLREG_TRACEDT08_PINSEL_TRACEDT08_MASK 0x00000700
#define PADCTRLREG_TRACEDT08_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT08_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT08_PDN_TRACEDT08_SHIFT 6
#define PADCTRLREG_TRACEDT08_PDN_TRACEDT08_MASK 0x00000040
#define PADCTRLREG_TRACEDT08_PUP_TRACEDT08_SHIFT 5
#define PADCTRLREG_TRACEDT08_PUP_TRACEDT08_MASK 0x00000020
#define PADCTRLREG_TRACEDT08_SRC_TRACEDT08_SHIFT 4
#define PADCTRLREG_TRACEDT08_SRC_TRACEDT08_MASK 0x00000010
#define PADCTRLREG_TRACEDT08_IND_TRACEDT08_SHIFT 3
#define PADCTRLREG_TRACEDT08_IND_TRACEDT08_MASK 0x00000008
#define PADCTRLREG_TRACEDT08_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT08_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT08_SEL_1_TRACEDT08_SHIFT 1
#define PADCTRLREG_TRACEDT08_SEL_1_TRACEDT08_MASK 0x00000002
#define PADCTRLREG_TRACEDT08_SEL_0_TRACEDT08_SHIFT 0
#define PADCTRLREG_TRACEDT08_SEL_0_TRACEDT08_MASK 0x00000001
#define PADCTRLREG_TRACEDT09_OFFSET 0x00000250
#define PADCTRLREG_TRACEDT09_TYPE UInt32
#define PADCTRLREG_TRACEDT09_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT09_PINSEL_TRACEDT09_SHIFT 8
#define PADCTRLREG_TRACEDT09_PINSEL_TRACEDT09_MASK 0x00000700
#define PADCTRLREG_TRACEDT09_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT09_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT09_PDN_TRACEDT09_SHIFT 6
#define PADCTRLREG_TRACEDT09_PDN_TRACEDT09_MASK 0x00000040
#define PADCTRLREG_TRACEDT09_PUP_TRACEDT09_SHIFT 5
#define PADCTRLREG_TRACEDT09_PUP_TRACEDT09_MASK 0x00000020
#define PADCTRLREG_TRACEDT09_SRC_TRACEDT09_SHIFT 4
#define PADCTRLREG_TRACEDT09_SRC_TRACEDT09_MASK 0x00000010
#define PADCTRLREG_TRACEDT09_IND_TRACEDT09_SHIFT 3
#define PADCTRLREG_TRACEDT09_IND_TRACEDT09_MASK 0x00000008
#define PADCTRLREG_TRACEDT09_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT09_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT09_SEL_1_TRACEDT09_SHIFT 1
#define PADCTRLREG_TRACEDT09_SEL_1_TRACEDT09_MASK 0x00000002
#define PADCTRLREG_TRACEDT09_SEL_0_TRACEDT09_SHIFT 0
#define PADCTRLREG_TRACEDT09_SEL_0_TRACEDT09_MASK 0x00000001
#define PADCTRLREG_TRACEDT10_OFFSET 0x00000254
#define PADCTRLREG_TRACEDT10_TYPE UInt32
#define PADCTRLREG_TRACEDT10_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT10_PINSEL_TRACEDT10_SHIFT 8
#define PADCTRLREG_TRACEDT10_PINSEL_TRACEDT10_MASK 0x00000700
#define PADCTRLREG_TRACEDT10_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT10_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT10_PDN_TRACEDT10_SHIFT 6
#define PADCTRLREG_TRACEDT10_PDN_TRACEDT10_MASK 0x00000040
#define PADCTRLREG_TRACEDT10_PUP_TRACEDT10_SHIFT 5
#define PADCTRLREG_TRACEDT10_PUP_TRACEDT10_MASK 0x00000020
#define PADCTRLREG_TRACEDT10_SRC_TRACEDT10_SHIFT 4
#define PADCTRLREG_TRACEDT10_SRC_TRACEDT10_MASK 0x00000010
#define PADCTRLREG_TRACEDT10_IND_TRACEDT10_SHIFT 3
#define PADCTRLREG_TRACEDT10_IND_TRACEDT10_MASK 0x00000008
#define PADCTRLREG_TRACEDT10_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT10_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT10_SEL_1_TRACEDT10_SHIFT 1
#define PADCTRLREG_TRACEDT10_SEL_1_TRACEDT10_MASK 0x00000002
#define PADCTRLREG_TRACEDT10_SEL_0_TRACEDT10_SHIFT 0
#define PADCTRLREG_TRACEDT10_SEL_0_TRACEDT10_MASK 0x00000001
#define PADCTRLREG_TRACEDT11_OFFSET 0x00000258
#define PADCTRLREG_TRACEDT11_TYPE UInt32
#define PADCTRLREG_TRACEDT11_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT11_PINSEL_TRACEDT11_SHIFT 8
#define PADCTRLREG_TRACEDT11_PINSEL_TRACEDT11_MASK 0x00000700
#define PADCTRLREG_TRACEDT11_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT11_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT11_PDN_TRACEDT11_SHIFT 6
#define PADCTRLREG_TRACEDT11_PDN_TRACEDT11_MASK 0x00000040
#define PADCTRLREG_TRACEDT11_PUP_TRACEDT11_SHIFT 5
#define PADCTRLREG_TRACEDT11_PUP_TRACEDT11_MASK 0x00000020
#define PADCTRLREG_TRACEDT11_SRC_TRACEDT11_SHIFT 4
#define PADCTRLREG_TRACEDT11_SRC_TRACEDT11_MASK 0x00000010
#define PADCTRLREG_TRACEDT11_IND_TRACEDT11_SHIFT 3
#define PADCTRLREG_TRACEDT11_IND_TRACEDT11_MASK 0x00000008
#define PADCTRLREG_TRACEDT11_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT11_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT11_SEL_1_TRACEDT11_SHIFT 1
#define PADCTRLREG_TRACEDT11_SEL_1_TRACEDT11_MASK 0x00000002
#define PADCTRLREG_TRACEDT11_SEL_0_TRACEDT11_SHIFT 0
#define PADCTRLREG_TRACEDT11_SEL_0_TRACEDT11_MASK 0x00000001
#define PADCTRLREG_TRACEDT12_OFFSET 0x0000025C
#define PADCTRLREG_TRACEDT12_TYPE UInt32
#define PADCTRLREG_TRACEDT12_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT12_PINSEL_TRACEDT12_SHIFT 8
#define PADCTRLREG_TRACEDT12_PINSEL_TRACEDT12_MASK 0x00000700
#define PADCTRLREG_TRACEDT12_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT12_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT12_PDN_TRACEDT12_SHIFT 6
#define PADCTRLREG_TRACEDT12_PDN_TRACEDT12_MASK 0x00000040
#define PADCTRLREG_TRACEDT12_PUP_TRACEDT12_SHIFT 5
#define PADCTRLREG_TRACEDT12_PUP_TRACEDT12_MASK 0x00000020
#define PADCTRLREG_TRACEDT12_SRC_TRACEDT12_SHIFT 4
#define PADCTRLREG_TRACEDT12_SRC_TRACEDT12_MASK 0x00000010
#define PADCTRLREG_TRACEDT12_IND_TRACEDT12_SHIFT 3
#define PADCTRLREG_TRACEDT12_IND_TRACEDT12_MASK 0x00000008
#define PADCTRLREG_TRACEDT12_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT12_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT12_SEL_1_TRACEDT12_SHIFT 1
#define PADCTRLREG_TRACEDT12_SEL_1_TRACEDT12_MASK 0x00000002
#define PADCTRLREG_TRACEDT12_SEL_0_TRACEDT12_SHIFT 0
#define PADCTRLREG_TRACEDT12_SEL_0_TRACEDT12_MASK 0x00000001
#define PADCTRLREG_TRACEDT13_OFFSET 0x00000260
#define PADCTRLREG_TRACEDT13_TYPE UInt32
#define PADCTRLREG_TRACEDT13_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT13_PINSEL_TRACEDT13_SHIFT 8
#define PADCTRLREG_TRACEDT13_PINSEL_TRACEDT13_MASK 0x00000700
#define PADCTRLREG_TRACEDT13_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT13_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT13_PDN_TRACEDT13_SHIFT 6
#define PADCTRLREG_TRACEDT13_PDN_TRACEDT13_MASK 0x00000040
#define PADCTRLREG_TRACEDT13_PUP_TRACEDT13_SHIFT 5
#define PADCTRLREG_TRACEDT13_PUP_TRACEDT13_MASK 0x00000020
#define PADCTRLREG_TRACEDT13_SRC_TRACEDT13_SHIFT 4
#define PADCTRLREG_TRACEDT13_SRC_TRACEDT13_MASK 0x00000010
#define PADCTRLREG_TRACEDT13_IND_TRACEDT13_SHIFT 3
#define PADCTRLREG_TRACEDT13_IND_TRACEDT13_MASK 0x00000008
#define PADCTRLREG_TRACEDT13_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT13_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT13_SEL_1_TRACEDT13_SHIFT 1
#define PADCTRLREG_TRACEDT13_SEL_1_TRACEDT13_MASK 0x00000002
#define PADCTRLREG_TRACEDT13_SEL_0_TRACEDT13_SHIFT 0
#define PADCTRLREG_TRACEDT13_SEL_0_TRACEDT13_MASK 0x00000001
#define PADCTRLREG_TRACEDT14_OFFSET 0x00000264
#define PADCTRLREG_TRACEDT14_TYPE UInt32
#define PADCTRLREG_TRACEDT14_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT14_PINSEL_TRACEDT14_SHIFT 8
#define PADCTRLREG_TRACEDT14_PINSEL_TRACEDT14_MASK 0x00000700
#define PADCTRLREG_TRACEDT14_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT14_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT14_PDN_TRACEDT14_SHIFT 6
#define PADCTRLREG_TRACEDT14_PDN_TRACEDT14_MASK 0x00000040
#define PADCTRLREG_TRACEDT14_PUP_TRACEDT14_SHIFT 5
#define PADCTRLREG_TRACEDT14_PUP_TRACEDT14_MASK 0x00000020
#define PADCTRLREG_TRACEDT14_SRC_TRACEDT14_SHIFT 4
#define PADCTRLREG_TRACEDT14_SRC_TRACEDT14_MASK 0x00000010
#define PADCTRLREG_TRACEDT14_IND_TRACEDT14_SHIFT 3
#define PADCTRLREG_TRACEDT14_IND_TRACEDT14_MASK 0x00000008
#define PADCTRLREG_TRACEDT14_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT14_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT14_SEL_1_TRACEDT14_SHIFT 1
#define PADCTRLREG_TRACEDT14_SEL_1_TRACEDT14_MASK 0x00000002
#define PADCTRLREG_TRACEDT14_SEL_0_TRACEDT14_SHIFT 0
#define PADCTRLREG_TRACEDT14_SEL_0_TRACEDT14_MASK 0x00000001
#define PADCTRLREG_TRACEDT15_OFFSET 0x00000268
#define PADCTRLREG_TRACEDT15_TYPE UInt32
#define PADCTRLREG_TRACEDT15_RESERVED_MASK 0xFFFFF800
#define PADCTRLREG_TRACEDT15_PINSEL_TRACEDT15_SHIFT 8
#define PADCTRLREG_TRACEDT15_PINSEL_TRACEDT15_MASK 0x00000700
#define PADCTRLREG_TRACEDT15_UNUSED_7_SHIFT 7
#define PADCTRLREG_TRACEDT15_UNUSED_7_MASK 0x00000080
#define PADCTRLREG_TRACEDT15_PDN_TRACEDT15_SHIFT 6
#define PADCTRLREG_TRACEDT15_PDN_TRACEDT15_MASK 0x00000040
#define PADCTRLREG_TRACEDT15_PUP_TRACEDT15_SHIFT 5
#define PADCTRLREG_TRACEDT15_PUP_TRACEDT15_MASK 0x00000020
#define PADCTRLREG_TRACEDT15_SRC_TRACEDT15_SHIFT 4
#define PADCTRLREG_TRACEDT15_SRC_TRACEDT15_MASK 0x00000010
#define PADCTRLREG_TRACEDT15_IND_TRACEDT15_SHIFT 3
#define PADCTRLREG_TRACEDT15_IND_TRACEDT15_MASK 0x00000008
#define PADCTRLREG_TRACEDT15_UNUSED_2_SHIFT 2
#define PADCTRLREG_TRACEDT15_UNUSED_2_MASK 0x00000004
#define PADCTRLREG_TRACEDT15_SEL_1_TRACEDT15_SHIFT 1
#define PADCTRLREG_TRACEDT15_SEL_1_TRACEDT15_MASK 0x00000002
#define PADCTRLREG_TRACEDT15_SEL_0_TRACEDT15_SHIFT 0
#define PADCTRLREG_TRACEDT15_SEL_0_TRACEDT15_MASK 0x00000001
#define PADCTRLREG_ACCESS_LOCK0_OFFSET 0x00000780
#define PADCTRLREG_ACCESS_LOCK0_TYPE UInt32
#define PADCTRLREG_ACCESS_LOCK0_RESERVED_MASK 0x00000000
#define PADCTRLREG_ACCESS_LOCK0_ACCESS_LOCK_SHIFT 0
#define PADCTRLREG_ACCESS_LOCK0_ACCESS_LOCK_MASK 0xFFFFFFFF
#define PADCTRLREG_ACCESS_LOCK1_OFFSET 0x00000784
#define PADCTRLREG_ACCESS_LOCK1_TYPE UInt32
#define PADCTRLREG_ACCESS_LOCK1_RESERVED_MASK 0x00000000
#define PADCTRLREG_ACCESS_LOCK1_ACCESS_LOCK_SHIFT 0
#define PADCTRLREG_ACCESS_LOCK1_ACCESS_LOCK_MASK 0xFFFFFFFF
#define PADCTRLREG_ACCESS_LOCK2_OFFSET 0x00000788
#define PADCTRLREG_ACCESS_LOCK2_TYPE UInt32
#define PADCTRLREG_ACCESS_LOCK2_RESERVED_MASK 0x00000000
#define PADCTRLREG_ACCESS_LOCK2_ACCESS_LOCK_SHIFT 0
#define PADCTRLREG_ACCESS_LOCK2_ACCESS_LOCK_MASK 0xFFFFFFFF
#define PADCTRLREG_ACCESS_LOCK3_OFFSET 0x0000078C
#define PADCTRLREG_ACCESS_LOCK3_TYPE UInt32
#define PADCTRLREG_ACCESS_LOCK3_RESERVED_MASK 0x00000000
#define PADCTRLREG_ACCESS_LOCK3_ACCESS_LOCK_SHIFT 0
#define PADCTRLREG_ACCESS_LOCK3_ACCESS_LOCK_MASK 0xFFFFFFFF
#define PADCTRLREG_ACCESS_LOCK4_OFFSET 0x00000790
#define PADCTRLREG_ACCESS_LOCK4_TYPE UInt32
#define PADCTRLREG_ACCESS_LOCK4_RESERVED_MASK 0xF8000000
#define PADCTRLREG_ACCESS_LOCK4_ACCESS_LOCK_SHIFT 0
#define PADCTRLREG_ACCESS_LOCK4_ACCESS_LOCK_MASK 0x07FFFFFF
#define PADCTRLREG_WR_ACCESS_OFFSET 0x000007F0
#define PADCTRLREG_WR_ACCESS_TYPE UInt32
#define PADCTRLREG_WR_ACCESS_RESERVED_MASK 0xFF0000FE
#define PADCTRLREG_WR_ACCESS_PASSWORD_SHIFT 8
#define PADCTRLREG_WR_ACCESS_PASSWORD_MASK 0x00FFFF00
#define PADCTRLREG_WR_ACCESS_PADCTRL_ACCESS_SHIFT 0
#define PADCTRLREG_WR_ACCESS_PADCTRL_ACCESS_MASK 0x00000001
#endif /* __BRCM_RDB_PADCTRLREG_H__ */
| baran0119/kernel_samsung_baffinlitexx | arch/arm/mach-java/include/mach/rdb_A0/brcm_rdb_padctrlreg.h | C | gpl-2.0 | 264,469 |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @package Zend_Service
*/
namespace ZendService\Amazon\Authentication;
use Zend\Crypt\Hmac;
/**
* @category Zend
* @package Zend_Service_Amazon
* @subpackage Authentication
*/
class V1 extends AbstractAuthentication
{
/**
* Signature Version
*/
protected $_signatureVersion = '1';
/**
* Signature Encoding Method
*/
protected $_signatureMethod = 'HmacSHA256';
/**
* Generate the required attributes for the signature
* @param string $url
* @param array $parameters
* @return string
*/
public function generateSignature($url, array &$parameters)
{
$parameters['AWSAccessKeyId'] = $this->_accessKey;
$parameters['SignatureVersion'] = $this->_signatureVersion;
$parameters['Version'] = $this->_apiVersion;
if (!isset($parameters['Timestamp'])) {
$parameters['Timestamp'] = gmdate('Y-m-d\TH:i:s\Z', time()+10);
}
$data = $this->_signParameters($url, $parameters);
return $data;
}
/**
* Computes the RFC 2104-compliant HMAC signature for request parameters
*
* This implements the Amazon Web Services signature, as per the following
* specification:
*
* 1. Sort all request parameters (including <tt>SignatureVersion</tt> and
* excluding <tt>Signature</tt>, the value of which is being created),
* ignoring case.
*
* 2. Iterate over the sorted list and append the parameter name (in its
* original case) and then its value. Do not URL-encode the parameter
* values before constructing this string. Do not use any separator
* characters when appending strings.
*
* @param string $url Queue URL
* @param array $parameters the parameters for which to get the signature.
*
* @return string the signed data.
*/
protected function _signParameters($url, array &$parameters)
{
$data = '';
uksort($parameters, 'strcasecmp');
unset($parameters['Signature']);
foreach ($parameters as $key => $value) {
$data .= $key . $value;
}
$hmac = Hmac::compute($this->_secretKey, 'SHA1', $data, Hmac::OUTPUT_BINARY);
$parameters['Signature'] = base64_encode($hmac);
return $data;
}
}
| IISH/vufind | vendor/zendframework/zendservice-amazon/library/ZendService/Amazon/Authentication/V1.php | PHP | gpl-2.0 | 2,650 |
/* VIDIS is a simulation and visualisation framework for distributed systems.
Copyright (C) 2009 Dominik Psenner, Christoph Caks
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. */
package vidis.ui.model.structure;
import java.util.Set;
import javax.media.opengl.GL;
import javax.vecmath.Point2d;
import vidis.ui.events.IEventHandler;
public interface IGuiContainer extends IVisObject, IEventHandler {
public final double Z_OFFSET = 0.0002;
public void render( GL gl );
public void renderBox( GL gl, double d );
public double getWantedHeight();
public double getHeight();
public double getWidth();
public double getX();
public double getY();
/**
* returns x in the roots coordinate system
* @return
*/
public double getAbsoluteX();
/**
* returns y in the roots coordinate system
* @return
*/
public double getAbsoluteY();
public void setHeight( double height );
public void setWidth( double width );
public void setX( double x );
public void setY( double y );
public void setBounds( double x, double y, double height, double width );
public void setLayout( ILayout layout );
public ILayout getLayout();
// parent child ..
public IGuiContainer getParent();
public Set<IGuiContainer> getChilds();
public void addChild( IGuiContainer c );
public void removeChild( IGuiContainer c );
public void setParent( IGuiContainer container );
// ----
public boolean isPointInContainer( Point2d p );
public void setVisible( boolean v );
public boolean isVisible();
}
| josepadilla930105/vidis | src/vidis/ui/model/structure/IGuiContainer.java | Java | gpl-3.0 | 2,144 |
// Boilerplate support routines for -*- C++ -*- dynamic memory management.
// Copyright (C) 1997-2016 Free Software Foundation, Inc.
//
// This file is part of GCC.
//
// GCC is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3, or (at your option)
// any later version.
//
// GCC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
#include <bits/c++config.h>
#include "new"
// The sized deletes are defined in other files.
#pragma GCC diagnostic ignored "-Wsized-deallocation"
_GLIBCXX_WEAK_DEFINITION void
operator delete[] (void *ptr) _GLIBCXX_USE_NOEXCEPT
{
::operator delete (ptr);
}
| selmentdev/selment-toolchain | source/gcc-latest/libstdc++-v3/libsupc++/del_opv.cc | C++ | gpl-3.0 | 1,345 |
.path-mod-assign div.gradingnavigation div {
float: left;
margin-left: 2em;
}
.path-mod-assign div.submissionstatustable,
.path-mod-assign div.submissionfull,
.path-mod-assign div.submissionlinks,
.path-mod-assign div.usersummary,
.path-mod-assign div.feedback,
.path-mod-assign div.gradingsummary {
margin-bottom: 5em;
}
.path-mod-assign div.submissionstatus .generaltable,
.path-mod-assign div.submissionlinks .generaltable,
.path-mod-assign div.feedback .generaltable,
.path-mod-assign div.submissionsummarytable .generaltable,
.path-mod-assign div.attempthistory table,
.path-mod-assign div.gradingsummary .generaltable {
width: 100%;
}
.path-mod-assign table.generaltable table td {
border: 0px none;
}
.path-mod-assign .gradingsummarytable,
.path-mod-assign .feedbacktable,
.path-mod-assign .lockedsubmission,
.path-mod-assign .submissionsummarytable {
margin-top: 1em;
}
.path-mod-assign div.submissionsummarytable table tbody tr td.c0 {
width: 30%;
}
.path-mod-assign .submittedlate {
color: red;
font-weight: 900;
}
.path-mod-assign.jsenabled .gradingoptionsform .fsubmit {
display: none;
}
.path-mod-assign.jsenabled .gradingtable .c1 select {
display: none;
}
.path-mod-assign .quickgradingform .mform fieldset {
margin: 0px;
padding: 0px;
}
.path-mod-assign .gradingbatchoperationsform .mform fieldset {
margin: 0px;
padding: 0px;
}
.path-mod-assign td.submissionstatus,
.path-mod-assign div.submissionstatus,
.path-mod-assign a:link.submissionstatus {
color: black;
background-color: #efefef;
}
.path-mod-assign td.submissionstatusdraft,
.path-mod-assign div.submissionstatusdraft,
.path-mod-assign a:link.submissionstatusdraft {
color: black;
background-color: #efefcf;
}
.path-mod-assign td.submissionstatussubmitted,
.path-mod-assign div.submissionstatussubmitted,
.path-mod-assign a:link.submissionstatussubmitted {
color: black;
background-color: #cfefcf;
}
.path-mod-assign td.submissionlocked,
.path-mod-assign div.submissionlocked {
color: black;
background-color: #efefcf;
}
.path-mod-assign td.submissionreopened,
.path-mod-assign div.submissionreopened {
color: black;
background-color: #efefef;
}
.path-mod-assign td.submissiongraded,
.path-mod-assign div.submissiongraded {
color: black;
background-color: #cfefcf;
}
.path-mod-assign td.submissionnotgraded,
.path-mod-assign div.submissionnotgraded {
color: black;
background-color: #efefef;
}
.path-mod-assign td.latesubmission,
.path-mod-assign a:link.latesubmission,
.path-mod-assign div.latesubmission {
color: black;
background-color: #efcfcf;
}
.path-mod-assign td.earlysubmission,
.path-mod-assign div.earlysubmission {
color: black;
background-color: #cfefcf;
}
.path-mod-assign .gradingtable .c0 {
display: none;
}
.path-mod-assign.jsenabled .gradingtable .c0 {
display: table-cell;
}
.path-mod-assign .gradingbatchoperationsform {
display: none;
}
.path-mod-assign.jsenabled .gradingbatchoperationsform {
display: block;
}
.path-mod-assign .gradingtable tr.selectedrow td {
background-color: #ffeecc;
}
.path-mod-assign .gradingtable tr.unselectedrow td {
background-color: white;
}
.path-mod-assign .gradingtable .c0 div.selectall {
margin-left: 7px;
}
.path-mod-assign .gradingtable .yui3-menu ul {
margin: 0px;
}
.path-mod-assign .gradingtable .yui3-menu-label {
padding-left: 0px;
line-height: 12px;
}
.path-mod-assign .gradingtable .yui3-menu-label img {
padding: 0 3px;
}
.path-mod-assign .gradingtable .yui3-menu li {
list-style-type: none;
}
.path-mod-assign.jsenabled .gradingtable .yui3-loading {
display: none;
}
.path-mod-assign .gradingtable .yui3-menu .yui3-menu-content {
border: 0px;
padding-top: 0;
}
.path-mod-assign div.gradingtable tr .quickgrademodified {
background-color: #FFCC99;
}
.path-mod-assign td.submissioneditable {
color: red;
}
.path-mod-assign .expandsummaryicon {
cursor: pointer;
display: none;
}
.path-mod-assign.jsenabled .expandsummaryicon {
display: inline;
}
.path-mod-assign .hidefull {
display: none;
}
.path-mod-assign .quickgradingform form .commentscontainer input,
.path-mod-assign .quickgradingform form .commentscontainer textarea {
display: none;
}
.path-mod-assign.jsenabled .quickgradingform form .commentscontainer input,
.path-mod-assign.jsenabled .quickgradingform form .commentscontainer textarea {
display: inline;
}
.path-mod-assign .previousfeedbackwarning {
font-size: 140%;
font-weight: bold;
text-align: center;
color: #500;
}
.path-mod-assign .submissionhistory {
background-color: #b0b0b0;
}
.path-mod-assign .submissionhistory .cell.historytitle {
background-color: #808080;
}
.path-mod-assign .submissionhistory .cell {
background-color: #d0d0d0;
}
.path-mod-assign.jsenabled .mod-assign-history-link {
display: block;
cursor: pointer;
margin-bottom: 7px;
}
.path-mod-assign.jsenabled .mod-assign-history-link h4 {
display: inline;
}
.path-mod-assign.jsenabled .attempthistory h4 {
margin-bottom: 7px;
text-align: left;
}
.path-mod-assign.jsenabled.dir_rtl .attempthistory h4 {
text-align: right;
}
.path-mod-assign.dir-rtl.jsenabled .mod-assign-history-link h4 {
text-align: right;
}
.path-mod-assign.jsenabled .mod-assign-history-link-open {
padding: 0 5px 0 20px; background: url([[pix:t/expanded]]) 2px center no-repeat;
}
.path-mod-assign.jsenabled .mod-assign-history-link-closed {
padding: 0 5px 0 20px; background: url([[pix:t/collapsed]]) 2px center no-repeat;
}
.path-mod-assign.dir-rtl.jsenabled .mod-assign-history-link-closed {
padding: 0 20px 0 5px; background: url([[pix:t/collapsed_rtl]]) 2px center no-repeat;
}
.path-mod-assign .submithelp {
padding: 1em;
}
.path-mod-assign .feedbacktitle {
font-weight: bold;
}
.path-mod-assign .submitconfirm,
.path-mod-assign .submissionlinks,
.path-mod-assign .submissionaction {
text-align: center;
}
.path-mod-assign .submissionsummarytable .c0,
.path-mod-assign .mod-assign-history-panel .c0 {
width: 150px;
}
.path-mod-assign .gradingtable .moodle-actionmenu {
white-space: nowrap;
}
.path-mod-assign .gradingtable .moodle-actionmenu[data-enhanced].show .menu a {
padding-left: 12px;
padding-right: 12px;
}
.path-mod-assign .gradingtable .menu-action img {
display: none;
}
.path-mod-assign .editsubmissionform input[name="submissionstatement"] {
vertical-align: top;
}
.path-mod-assign .editsubmissionform label[for="id_submissionstatement"] {
display: inline-block;
}
.path-mod-assign.layout-option-nonavbar {
padding-top: 0px;
}
.path-mod-assign [data-region="user-selector"] select {
margin-bottom: 0px;
}
.path-mod-assign [data-region="user-selector"] .alignment {
float: right;
width: 320px;
text-align: center;
margin-top: 7px;
}
.path-mod-assign [data-region="user-selector"] [data-action="previous-user"],
.path-mod-assign [data-region="user-selector"] [data-action="next-user"] {
font-size: 26px;
}
.path-mod-assign [data-region="user-selector"] [data-action="next-user"] {
margin-left: -10px;
}
.dir-rtl.path-mod-assign [data-region="user-selector"] [data-action="next-user"] {
margin-right: -10px;
}
.dir-rtl.path-mod-assign [data-region="user-selector"] .alignment {
float: left;
}
.path-mod-assign [data-region="user-selector"] .alignment input {
margin-bottom: 5px;
}
.path-mod-assign [data-region="user-selector"] .alignment .form-autocomplete-downarrow {
top: 0;
}
.path-mod-assign [data-region="user-selector"] .form-autocomplete-selection {
display: none;
}
.path-mod-assign [data-region="user-selector"] .form-autocomplete-suggestions {
text-align: left;
}
.path-mod-assign [data-region="user-selector"] .form-autocomplete-suggestions {
margin-left: 48px;
}
.dir-rtl.path-mod-assign [data-region="user-selector"] .form-autocomplete-suggestions {
margin-right: 64px;
}
.path-mod-assign [data-region="user-filters"] {
font-size: small;
}
.path-mod-assign [data-region="configure-filters"] {
display: none;
text-align: left;
width: auto;
background-color: #fff;
background-clip: padding-box;
box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
border-radius: 6px;
position: absolute;
margin-top: 28px;
margin-left: -140px;
padding: 10px 0;
z-index: 1;
}
.path-mod-assign [data-region="configure-filters"]::before,
.path-mod-assign [data-region="configure-filters"]::after {
position: absolute;
left: auto;
display: inline-block;
content: '';
border-style: solid;
border-color: transparent;
border-top: none;
}
.path-mod-assign [data-region="configure-filters"]::before {
top: -7px;
right: 12px;
border-width: 7px;
border-bottom-color: rgba(0, 0, 0, 0.2);
}
.path-mod-assign [data-region="configure-filters"]::after {
top: -6px;
right: 13px;
border-width: 6px;
border-bottom-color: #fff;
}
.path-mod-assign.dir-rtl [data-region="configure-filters"] {
text-align: right;
margin-left: 0;
margin-right: -140px;
}
.path-mod-assign [data-region="configure-filters"] label {
padding: 3px 20px;
}
.path-mod-assign .alignment [data-region="configure-filters"] input {
margin-bottom: 0;
}
.path-mod-assign [data-region="grading-navigation-panel"] {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 6em;
margin: 0;
border-bottom: 1px solid #ddd;
}
.path-mod-assign [data-region="grading-navigation"] {
padding: 1em;
}
.path-mod-assign [data-region="user-info"] {
height: 60px;
}
.path-mod-assign [data-region="user-info"] a {
text-decoration: none;
}
.path-mod-assign [data-region="user-info"] .img-rounded {
display: block;
float: left;
margin-top: -3px;
margin-right: 10px;
}
.dir-rtl.path-mod-assign [data-region="user-info"] .img-rounded {
float: right;
margin-left: 10px;
}
.path-mod-assign [data-region="user-info"] em {
display: block;
font-style: normal;
}
.path-mod-assign [data-region="grading-actions-form"] label {
display: inline-block;
}
.path-mod-assign.pagelayout-embedded {
overflow: hidden;
}
.path-mod-assign [data-region="review-panel"] {
position: absolute;
top: 85px;
bottom: 60px;
left: 0;
width: 70%;
box-sizing: border-box;
}
.path-mod-assign [data-region="review-panel"] .pageheader {
border-right: 1px solid #ddd;
}
.path-mod-assign [data-region="review-panel"] .drawingregion {
left: 0;
right: 0;
border-color: #ddd;
}
.path-mod-assign [data-region="grade-panel"].fullwidth {
position: absolute;
top: 7em;
left: 0;
right: 0;
width: 99%;
overflow: auto;
bottom: 7em;
}
.path-mod-assign [data-region="grade-panel"].fullwidth [data-region="grade"] {
max-width: 800px;
margin-left: auto;
margin-right: auto;
}
.path-mod-assign [data-region="grade-panel"] {
position: absolute;
top: 85px;
bottom: 60px;
right: 0;
width: 30%;
overflow: auto;
box-sizing: border-box;
background-color: #f5f5f5;
padding: 15px;
padding-top: 0px;
}
.path-mod-assign [data-region="grade-panel"] h3 {
font-size: 18px;
font-weight: 500;
}
/***** Start submission status *****/
.path-mod-assign [data-region="grade-panel"] div.submissionstatustable {
margin-bottom: 2em;
}
.path-mod-assign [data-region="grade-panel"] .submissionsummarytable {
margin-left: 5px;
margin-right: 5px;
}
.path-mod-assign [data-region="grade-panel"] .submissionsummarytable table.generaltable td {
padding: 8px 0;
background-color: transparent;
}
.path-mod-assign [data-region="grade-panel"] .submissionsummarytable .generaltable tbody > tr:nth-child(2n+1) > td,
.path-mod-assign [data-region="grade-panel"] .submissionsummarytable .generaltable tbody tr:hover > td {
background-color: transparent;
}
.path-mod-assign [data-region="grade-panel"] div.submissionsummarytable table tbody tr td.c0 {
width: auto;
}
.path-mod-assign [data-region="grade-panel"] div.submissionsummarytable table tbody tr.lastrow td.c0,
.path-mod-assign [data-region="grade-panel"] div.submissionsummarytable table tbody tr.lastrow td.c1 {
border-bottom: 1px solid #ddd;
}
.path-mod-assign [data-region="grade-panel"] td.submissionnotgraded,
.path-mod-assign [data-region="grade-panel"] div.submissionnotgraded {
color: red;
background-color: transparent;
}
/***** End submission status *****/
.path-mod-assign [data-region="grade-panel"] #id_gradeheader {
display: table-cell;
min-width: 0;
}
.path-mod-assign [data-region="grade-panel"] #id_gradeheader > legend {
visibility: hidden;
height: 0;
margin-bottom: 0;
}
.path-mod-assign [data-region="grade-panel"] .comment-area textarea[cols] {
width: 100%;
box-sizing: border-box;
}
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_ftext,
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_f,
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_feditor,
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_ffilemanager {
background-color: #fff;
border: 1px solid #ddd;
margin-bottom: 20px;
}
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_ftext .fitemtitle,
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_f .fitemtitle,
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_feditor .fitemtitle,
.path-mod-assign [data-region="grade-panel"] .mform .fitem.fitem_ffilemanager .fitemtitle {
padding-left: 5px;
padding-right: 5px;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.fitem_ftext .felement,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.fitem_f .felement,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.fitem_feditor .felement,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.fitem_ffilemanager .felement {
padding: 6px 10px 10px;
box-sizing: border-box;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fitem.fitem_ftext .fitemtitle,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fitem.fitem_f .fitemtitle,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fitem.fitem_feditor .fitemtitle,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fitem.fitem_ffilemanager .fitemtitle {
border-bottom: 1px solid #ddd;
box-shadow: 0 1px 1px rgba(0,0,0,0.05);
padding: 6px 10px 3px;
box-sizing: border-box;
}
.path-mod-assign #page-content [data-region="grade-panel"] [data-region="popout-button"] img {
margin-left: 2px;
margin-right: 2px;
margin-top: -2px;
}
.path-mod-assign #page-content [data-region="grade-panel"] .popout [data-region="popout-button"] img {
margin-left: -6px;
margin-right: -6px;
margin-top: 4px;
}
.path-mod-assign [data-region="grade-panel"] .fitem .fstaticlabel,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fitem .fitemtitle label {
font-weight: 500;
}
/***** Start grade *****/
.path-mod-assign [data-region="grade-panel"] .mform #fitem_id_grade.fitem {
padding-top: 5px;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) #fitem_id_grade.fitem .fitemtitle {
display: inline-block;
width: auto;
border-bottom: none;
box-shadow: none;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) #fitem_id_grade.fitem .felement {
width: auto;
float: right;
}
.path-mod-assign #page-content .mform:not(.unresponsive) #fitem_id_grade.fitem .felement input {
width: 80px;
margin-bottom: 0;
}
/***** End grade *****/
/***** Start rubric *****/
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric {
padding-bottom: 0;
max-width: none;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric .criterion .description {
font-weight: 500;
min-width: 150px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric .criterion .levels {
background-color: #fff;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric .criterion,
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric .criterion.even {
background-color: transparent;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric.evaluate .criterion .levels .level:hover {
background-color: #dff0d8;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric .criterion .levels .level.checked {
background-color: #dff0d8;
border: none;
border-left: 1px solid #ddd;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric .criterion .levels .level .score {
color: #468847;
font-weight: 500;
font-style: normal;
margin-top: 20px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_rubric .criterion .remark textarea {
margin-bottom: 0;
}
/***** End rubric *****/
/***** Start marking guide *****/
.path-mod-assign [data-region="grade-panel"] .gradingform_guide {
margin-bottom: 10px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .descriptionreadonly,
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .remark,
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .score {
display: block;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .descriptionreadonly {
padding-top: 10px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .criteriondescription {
margin-top: 5px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .criteriondescriptionmarkers {
width: auto;
margin-top: 5px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .markingguideremark {
margin-bottom: 10px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .remark .commentchooser {
float: right;
margin-top: 2px;
margin-left: 0;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .score {
float: left;
padding-bottom: 8px;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .score input,
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .score div {
display: inline-block;
}
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .criterion,
.path-mod-assign [data-region="grade-panel"] .gradingform_guide .criterion.even {
background-color: transparent;
border-width: 0 0 1px 0;
padding: 8px 0;
}
.path-mod-assign [data-region="grade-panel"] .showmarkerdesc,
.path-mod-assign [data-region="grade-panel"] .showstudentdesc {
background-color: #f5f5f5;
padding: 10px;
}
/***** End marking guide *****/
.path-mod-assign [data-region="grade-panel"] .fitem.fitem_ffilemanager {
margin-bottom: 0;
}
/***** Start popout dialogue *****/
.path-mod-assign [data-region="grade-panel"] .fitem.popout {
position: fixed;
left: 20%;
right: 20%;
top: 20%;
bottom: 20%;
z-index: 1000;
border: 1px solid rgba(0, 0, 0, 0.3);
border-radius: 6px;
box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3);
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.popout .fitemtitle {
text-align: center;
padding-left: 15px;
padding-right: 15px;
height: 45px;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.popout .fitemtitle label {
font-size: 16px;
line-height: 30px;
}
.path-mod-assign #page-content [data-region="grade-panel"] [data-region="popout-button"] {
float: right;
}
.dir-rtl.path-mod-assign #page-content [data-region="grade-panel"] [data-region="popout-button"] {
float: left;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fitem.popout .fitemtitle [data-region="popout-button"] img {
margin-top: -10px;
margin-right: -7px;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.popout .felement {
padding: 10px 15px 15px;
height: calc(100% - 54px);
overflow: auto;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) .fcontainer .fitem.popout .felement .gradingform_rubric {
overflow: visible;
}
/***** End popout dialogue *****/
/***** Start attempt settings *****/
.path-mod-assign [data-region="grade-panel"] #id_attemptsettings > legend {
font-size: 18px;
font-weight: 500;
line-height: 40px;
border-bottom: 0;
margin-bottom: 10px;
}
.path-mod-assign [data-region="grade-panel"] #id_attemptsettings .fcontainer {
display: table;
width: 100%;
padding-left: 5px;
padding-right: 5px;
margin-bottom: 10px;
box-sizing: border-box;
}
.path-mod-assign [data-region="grade-panel"] .mform #id_attemptsettings .fitem {
display: table-row;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) #id_attemptsettings .fitem .fitemtitle,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) #id_attemptsettings .fitem .felement {
display: table-cell;
float: none;
border-top: 1px solid #ddd;
padding: 8px 0;
}
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) #id_attemptsettings .fitem:last-of-type .fitemtitle,
.path-mod-assign #page-content [data-region="grade-panel"] .mform:not(.unresponsive) #id_attemptsettings .fitem:last-of-type .felement {
border-bottom: 1px solid #ddd;
}
.path-mod-assign [data-region="grade-panel"] #id_attemptsettings .fitem .fstaticlabel,
.path-mod-assign [data-region="grade-panel"] .mform:not(.unresponsive) #id_attemptsettings .fitem .fitemtitle label {
font-weight: 400;
}
.path-mod-assign [data-region="grade-panel"] .mform:not(.unresponsive) #id_attemptsettings .fitem .felement select {
margin-bottom: 0;
}
.path-mod-assign [data-region="grade-panel"] [data-region="attempt-chooser"] {
margin-bottom: 10px;
vertical-align: text-bottom;
}
/***** End attempt settings *****/
.path-mod-assign [data-region="grade-actions-panel"] {
border-top: 1px solid #ddd;
position: absolute;
bottom: 0;
left: 0;
width: 100%;
height: 60px;
}
.path-mod-assign [data-region="grade-actions"] {
padding: 1em;
text-align: center;
}
.path-mod-assign [data-region="submissions-list"] {
text-align: inherit;
}
.path-mod-assign [data-region="submissions-list"] label.radio input {
margin-top: 4px;
min-width: inherit;
}
.path-mod-assign [data-region="overlay"] {
display: none;
z-index: 100;
position: absolute;
top: 0em;
left: 0;
width: 100%;
overflow: auto;
bottom: 0em;
background-color: #ddd;
opacity: 0.4;
padding-top: 4em;
text-align: center;
}
@media (max-width: 767px) {
.path-mod-assign.pagelayout-embedded {
overflow: auto;
}
.path-mod-assign [data-region="assignment-info"] {
border-bottom: 1px solid #ddd;
padding-bottom: 5px;
}
.path-mod-assign .page-context-header .page-header-headings {
margin-top: 13px;
}
.path-mod-assign [data-region="grading-navigation-panel"],
.path-mod-assign [data-region="review-panel"],
.path-mod-assign [data-region="grade-panel"],
.path-mod-assign [data-region="grade-actions-panel"] {
position: inherit;
width: 100%;
top: 0;
left: 0;
overflow: auto;
height: auto;
margin-bottom: 1em;
}
.path-mod-assign [data-region="grading-navigation"] {
padding: 0;
text-align: center;
}
.path-mod-assign [data-region="grade-panel"] {
margin-bottom: 2em;
}
.path-mod-assign [data-region="grade-panel"] [data-region="popout-button"] {
display: none;
}
.path-mod-assign [data-region="review-panel"] .pageheader {
border-right: none;
}
.path-mod-assign.pagelayout-popup {
overflow: inherit;
}
.path-mod-assign [data-region="grading-navigation"] [data-region="user-info"] {
text-align: left;
width: auto;
display: inline-block;
margin: 0 auto;
}
.path-mod-assign [data-region="user-selector"] .alignment {
float: none;
margin: 0 auto 10px;
}
}
/** Start of CSS to make forms vertical in the grading panel (taken from theme/bootstrapbase/less/moodle/forms.less). */
.path-mod-assign [data-region="grade-panel"] .mform .fitem .fitemtitle {
display: block;
margin-top: 4px;
margin-bottom: 4px;
text-align: left;
width: 100%;
}
.path-mod-assign [data-region="grade-panel"] .mform .fitem .felement {
margin-left: 0;
width: 100%;
float: left;
padding-left: 0;
padding-right: 0;
}
.path-mod-assign [data-region="grade-panel"] .mform .fitem .fstatic:empty {
display: none;
}
.path-mod-assign [data-region="grade-panel"] .mform .fitem .fcheckbox > span,
.path-mod-assign [data-region="grade-panel"] .mform .fitem .fradio > span,
.path-mod-assign [data-region="grade-panel"] .mform .fitem .fgroup > span {
margin-top: 4px;
}
.path-mod-assign [data-region="grade-panel"] .mform .femptylabel .fitemtitle {
display: inline-block;
width: auto;
margin-right: 8px;
}
.path-mod-assign [data-region="grade-panel"] .mform .femptylabel .felement {
display: inline-block;
margin-top: 4px;
padding-top: 5px;
width: auto;
}
.path-mod-assign [data-region="grade-panel"] .mform .fitem_fcheckbox .fitemtitle,
.path-mod-assign [data-region="grade-panel"] .mform .fitem_fcheckbox .felement {
display: inline-block;
width: auto;
}
.path-mod-assign [data-region="grade-panel"] .mform .fitem_fcheckbox .felement {
padding: 6px;
}
.dir-rtl.path-mod-assign [data-region="grade-panel"] .mform .femptylabel .fitemtitle {
margin-right: 0px;
margin-left: 8px;
}
.dir-rtl.path-mod-assign [data-region="grade-panel"] .mform .fitem .fitemtitle {
text-align: right;
}
.dir-rtl.path-mod-assign [data-region="grade-panel"] .mform .fitem .felement {
margin-right: 0;
float: right;
padding-right: 0;
padding-left: 0;
}
.dir-rtl.path-mod-assign [data-region="grade-panel"] .mform .fitem_checkbox .felement {
float: right;
}
/** End of CSS to make forms vertical in the grading panel (taken from theme/bootstrapbase/less/moodle/forms.less). */
/** Styles to fix base theme **/
.path-mod-assign #page,
.path-mod-assign #page-content {
position: inherit;
}
/** End of base fixes **/
| rafaelperazzo/ufca-web | moodle/mod/assign/styles.css | CSS | gpl-3.0 | 27,084 |
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package treemap
import (
"github.com/emirpasic/gods/containers"
rbt "github.com/emirpasic/gods/trees/redblacktree"
)
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithKey = (*Iterator)(nil)
}
// Iterator holding the iterator's state
type Iterator struct {
iterator rbt.Iterator
}
// Iterator returns a stateful iterator whose elements are key/value pairs.
func (m *Map) Iterator() Iterator {
return Iterator{iterator: m.tree.Iterator()}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
return iterator.iterator.Next()
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
return iterator.iterator.Prev()
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
return iterator.iterator.Value()
}
// Key returns the current element's key.
// Does not modify the state of the iterator.
func (iterator *Iterator) Key() interface{} {
return iterator.iterator.Key()
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.iterator.Begin()
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.iterator.End()
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator
func (iterator *Iterator) First() bool {
return iterator.iterator.First()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
return iterator.iterator.Last()
}
| wupeaking/redgo | vendor/github.com/wupeaking/gods/maps/treemap/iterator.go | GO | gpl-3.0 | 2,772 |
/*
* Copyright (c) 2009, Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
package org.contikios.cooja.mspmote.plugins;
import java.awt.Color;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.log4j.Logger;
import org.jdom.Element;
import org.contikios.cooja.Watchpoint;
import org.contikios.cooja.mspmote.MspMote;
import org.contikios.cooja.util.StringUtils;
import se.sics.mspsim.core.Memory;
import se.sics.mspsim.core.MemoryMonitor;
/**
* Mspsim watchpoint.
*
* @author Fredrik Osterlind
*/
public class MspBreakpoint implements Watchpoint {
private static Logger logger = Logger.getLogger(MspBreakpoint.class);
private MspMote mspMote;
private int address = -1; /* Binary address */
private File codeFile = null; /* Source code, may be null*/
private int lineNr = -1; /* Source code line number, may be null */
private MemoryMonitor memoryMonitor = null;
private boolean stopsSimulation = true;
private String msg = null;
private Color color = Color.BLACK;
private String contikiCode = null;
public MspBreakpoint(MspMote mote) {
this.mspMote = mote;
/* expects setConfigXML(..) */
}
public MspBreakpoint(MspMote mote, Integer address, File codeFile, Integer lineNr) {
this(mote);
this.address = address;
this.codeFile = codeFile;
this.lineNr = lineNr;
createMonitor();
}
public MspMote getMote() {
return mspMote;
}
public Color getColor() {
return color;
}
public void setColor(Color color) {
this.color = color;
}
public String getDescription() {
String desc = "";
if (codeFile != null) {
desc += codeFile.getPath() + ":" + lineNr + " (0x" + Integer.toHexString(address) + ")";
} else if (address >= 0) {
desc += "0x" + Integer.toHexString(address);
}
if (msg != null) {
desc += "\n\n" + msg;
}
return desc;
}
public void setUserMessage(String msg) {
this.msg = msg;
}
public String getUserMessage() {
return msg;
}
public File getCodeFile() {
return codeFile;
}
public int getLineNumber() {
return lineNr;
}
public int getExecutableAddress() {
return address;
}
public void setStopsSimulation(boolean stops) {
stopsSimulation = stops;
}
public boolean stopsSimulation() {
return stopsSimulation;
}
private void createMonitor() {
memoryMonitor = new MemoryMonitor.Adapter() {
@Override
public void notifyReadBefore(int addr, Memory.AccessMode mode, Memory.AccessType type) {
if (type != Memory.AccessType.EXECUTE) {
return;
}
mspMote.signalBreakpointTrigger(MspBreakpoint.this);
}
};
mspMote.getCPU().addWatchPoint(address, memoryMonitor);
/* Remember Contiki code, to verify it when reloaded */
if (contikiCode == null) {
final String code = StringUtils.loadFromFile(codeFile);
if (code != null) {
String[] lines = code.split("\n");
if (lineNr-1 < lines.length) {
contikiCode = lines[lineNr-1].trim();
}
}
}
}
public void unregisterBreakpoint() {
mspMote.getCPU().removeWatchPoint(address, memoryMonitor);
}
public Collection<Element> getConfigXML() {
ArrayList<Element> config = new ArrayList<Element>();
Element element;
element = new Element("stops");
element.setText("" + stopsSimulation);
config.add(element);
element = new Element("codefile");
File file = mspMote.getSimulation().getCooja().createPortablePath(codeFile);
element.setText(file.getPath().replaceAll("\\\\", "/"));
config.add(element);
element = new Element("line");
element.setText("" + lineNr);
config.add(element);
if (contikiCode != null) {
element = new Element("contikicode");
element.setText(contikiCode);
config.add(element);
}
if (msg != null) {
element = new Element("msg");
element.setText(msg);
config.add(element);
}
if (color != null) {
element = new Element("color");
element.setText("" + color.getRGB());
config.add(element);
}
return config;
}
public boolean setConfigXML(Collection<Element> configXML, boolean visAvailable) {
/* Already knows mote and breakpoints */
for (Element element : configXML) {
if (element.getName().equals("codefile")) {
File file = new File(element.getText());
file = mspMote.getSimulation().getCooja().restorePortablePath(file);
try {
codeFile = file.getCanonicalFile();
} catch (IOException e) {
}
if (codeFile == null || !codeFile.exists()) {
return false;
}
} else if (element.getName().equals("line")) {
lineNr = Integer.parseInt(element.getText());
} else if (element.getName().equals("contikicode")) {
String lastContikiCode = element.getText().trim();
/* Verify that Contiki code did not change */
final String code = StringUtils.loadFromFile(codeFile);
if (code != null) {
String[] lines = code.split("\n");
if (lineNr-1 < lines.length) {
contikiCode = lines[lineNr-1].trim();
}
}
if (!lastContikiCode.equals(contikiCode)) {
logger.warn("Detected modified Contiki code at breakpoint: " + codeFile.getPath() + ":" + lineNr + ".");
logger.warn("From: '" + lastContikiCode + "'");
logger.warn(" To: '" + contikiCode + "'");
}
} else if (element.getName().equals("msg")) {
msg = element.getText();
} else if (element.getName().equals("color")) {
color = new Color(Integer.parseInt(element.getText()));
} else if (element.getName().equals("stops")) {
stopsSimulation = Boolean.parseBoolean(element.getText());
}
}
/* Update executable address */
address = mspMote.getExecutableAddressOf(codeFile, lineNr);
if (address < 0) {
logger.fatal("Could not restore breakpoint, did source code change?");
return false;
}
createMonitor();
return true;
}
public String toString() {
return getMote() + ": " + getDescription();
}
}
| vijaysrao/dipa | tools/mspsim/src/org/contikios/cooja/mspmote/plugins/MspBreakpoint.java | Java | gpl-3.0 | 7,777 |
--[[
-- @brief Fetches an array of systems from min to max jumps away from the given
-- system sys.
--
-- The following example gets a random Sirius M class planet between 1 to 6 jumps away.
--
-- @code
-- local planets = {}
-- getsysatdistance( system.cur(), 1, 6,
-- function(s)
-- for i, v in ipairs(s:planets()) do
-- if v:faction() == faction.get("Sirius") and v:class() == "M" then
-- return true
-- end
-- end
-- return false
-- end )
--
-- if #planets == 0 then abort() end -- Sanity in case no suitable planets are in range.
--
-- local index = rnd.rnd(1, #planets)
-- destplanet = planets[index][1]
-- destsys = planets[index][2]
-- @endcode
--
-- @param sys System to calculate distance from or nil to use current system
-- @param min Min distance to check for.
-- @param max Maximum distance to check for.
-- @param filter Optional filter function to use for more details.
-- @param data Data to pass to filter
-- @param hidden Whether or not to consider hidden jumps (off by default)
-- @return The table of systems n jumps away from sys
--]]
function getsysatdistance( sys, min, max, filter, data, hidden )
-- Get default parameters
if sys == nil then
sys = system.cur()
end
if max == nil then
max = min
end
open = { sys }
close = { [sys:name()]=sys }
dist = { [sys:name()]=0 }
-- Run max times
for i=1,max do
nopen = {}
-- Get all the adjacent system of the current set
for _,s in ipairs(open) do
adjsys = s:adjacentSystems( hidden ) -- Get them all
for _,a in ipairs(adjsys) do
-- Must not have been explored previously
if close[ a:name() ] == nil then
nopen[ #nopen+1 ] = a
close[ a:name() ] = a
dist[ a:name() ] = i
end
end
end
open = nopen -- New table becomes the old
end
-- Now we filter the solutions
finalset = {}
for i,s in pairs(close) do
if dist[i] >= min and dist[i] <= max and
(filter == nil or filter(s,data)) then
finalset[ #finalset+1 ] = s
end
end
return finalset
end
| pydsigner/naev | dat/scripts/jumpdist.lua | Lua | gpl-3.0 | 2,247 |
/* Linux-specific atomic operations for ARM EABI.
Copyright (C) 2008-2016 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Kernel helper for compare-and-exchange. */
typedef int (__kernel_cmpxchg_t) (int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
/* Kernel helper for memory barrier. */
typedef void (__kernel_dmb_t) (void);
#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
/* Note: we implement byte, short and int versions of atomic operations using
the above kernel helpers; see linux-atomic-64bit.c for "long long" (64-bit)
operations. */
#define HIDDEN __attribute__ ((visibility ("hidden")))
#ifdef __ARMEL__
#define INVERT_MASK_1 0
#define INVERT_MASK_2 0
#else
#define INVERT_MASK_1 24
#define INVERT_MASK_2 16
#endif
#define MASK_1 0xffu
#define MASK_2 0xffffu
#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
int HIDDEN \
__sync_fetch_and_##OP##_4 (int *ptr, int val) \
{ \
int failure, tmp; \
\
do { \
tmp = *ptr; \
failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
} while (failure != 0); \
\
return tmp; \
}
FETCH_AND_OP_WORD (add, , +)
FETCH_AND_OP_WORD (sub, , -)
FETCH_AND_OP_WORD (or, , |)
FETCH_AND_OP_WORD (and, , &)
FETCH_AND_OP_WORD (xor, , ^)
FETCH_AND_OP_WORD (nand, ~, &)
#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
subword-sized quantities. */
#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
TYPE HIDDEN \
NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
{ \
int *wordptr = (int *) ((unsigned int) ptr & ~3); \
unsigned int mask, shift, oldval, newval; \
int failure; \
\
shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
mask = MASK_##WIDTH << shift; \
\
do { \
oldval = *wordptr; \
newval = ((PFX_OP (((oldval & mask) >> shift) \
INF_OP (unsigned int) val)) << shift) & mask; \
newval |= oldval & ~mask; \
failure = __kernel_cmpxchg (oldval, newval, wordptr); \
} while (failure != 0); \
\
return (RETURN & mask) >> shift; \
}
SUBWORD_SYNC_OP (add, , +, short, 2, oldval)
SUBWORD_SYNC_OP (sub, , -, short, 2, oldval)
SUBWORD_SYNC_OP (or, , |, short, 2, oldval)
SUBWORD_SYNC_OP (and, , &, short, 2, oldval)
SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval)
SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval)
SUBWORD_SYNC_OP (add, , +, signed char, 1, oldval)
SUBWORD_SYNC_OP (sub, , -, signed char, 1, oldval)
SUBWORD_SYNC_OP (or, , |, signed char, 1, oldval)
SUBWORD_SYNC_OP (and, , &, signed char, 1, oldval)
SUBWORD_SYNC_OP (xor, , ^, signed char, 1, oldval)
SUBWORD_SYNC_OP (nand, ~, &, signed char, 1, oldval)
#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
int HIDDEN \
__sync_##OP##_and_fetch_4 (int *ptr, int val) \
{ \
int tmp, failure; \
\
do { \
tmp = *ptr; \
failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
} while (failure != 0); \
\
return PFX_OP (tmp INF_OP val); \
}
OP_AND_FETCH_WORD (add, , +)
OP_AND_FETCH_WORD (sub, , -)
OP_AND_FETCH_WORD (or, , |)
OP_AND_FETCH_WORD (and, , &)
OP_AND_FETCH_WORD (xor, , ^)
OP_AND_FETCH_WORD (nand, ~, &)
SUBWORD_SYNC_OP (add, , +, short, 2, newval)
SUBWORD_SYNC_OP (sub, , -, short, 2, newval)
SUBWORD_SYNC_OP (or, , |, short, 2, newval)
SUBWORD_SYNC_OP (and, , &, short, 2, newval)
SUBWORD_SYNC_OP (xor, , ^, short, 2, newval)
SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval)
SUBWORD_SYNC_OP (add, , +, signed char, 1, newval)
SUBWORD_SYNC_OP (sub, , -, signed char, 1, newval)
SUBWORD_SYNC_OP (or, , |, signed char, 1, newval)
SUBWORD_SYNC_OP (and, , &, signed char, 1, newval)
SUBWORD_SYNC_OP (xor, , ^, signed char, 1, newval)
SUBWORD_SYNC_OP (nand, ~, &, signed char, 1, newval)
int HIDDEN
__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
{
int actual_oldval, fail;
while (1)
{
actual_oldval = *ptr;
if (__builtin_expect (oldval != actual_oldval, 0))
return actual_oldval;
fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
if (__builtin_expect (!fail, 1))
return oldval;
}
}
#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
TYPE HIDDEN \
__sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
TYPE newval) \
{ \
int *wordptr = (int *)((unsigned int) ptr & ~3), fail; \
unsigned int mask, shift, actual_oldval, actual_newval; \
\
shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
mask = MASK_##WIDTH << shift; \
\
while (1) \
{ \
actual_oldval = *wordptr; \
\
if (__builtin_expect (((actual_oldval & mask) >> shift) != \
((unsigned int) oldval & MASK_##WIDTH), 0)) \
return (actual_oldval & mask) >> shift; \
\
actual_newval = (actual_oldval & ~mask) \
| (((unsigned int) newval << shift) & mask); \
\
fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
wordptr); \
\
if (__builtin_expect (!fail, 1)) \
return oldval; \
} \
}
SUBWORD_VAL_CAS (short, 2)
SUBWORD_VAL_CAS (signed char, 1)
typedef unsigned char bool;
bool HIDDEN
__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
{
int failure = __kernel_cmpxchg (oldval, newval, ptr);
return (failure == 0);
}
#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
bool HIDDEN \
__sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
TYPE newval) \
{ \
TYPE actual_oldval \
= __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
return (oldval == actual_oldval); \
}
SUBWORD_BOOL_CAS (short, 2)
SUBWORD_BOOL_CAS (signed char, 1)
void HIDDEN
__sync_synchronize (void)
{
__kernel_dmb ();
}
int HIDDEN
__sync_lock_test_and_set_4 (int *ptr, int val)
{
int failure, oldval;
do {
oldval = *ptr;
failure = __kernel_cmpxchg (oldval, val, ptr);
} while (failure != 0);
return oldval;
}
#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
TYPE HIDDEN \
__sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
{ \
int failure; \
unsigned int oldval, newval, shift, mask; \
int *wordptr = (int *) ((unsigned int) ptr & ~3); \
\
shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
mask = MASK_##WIDTH << shift; \
\
do { \
oldval = *wordptr; \
newval = (oldval & ~mask) \
| (((unsigned int) val << shift) & mask); \
failure = __kernel_cmpxchg (oldval, newval, wordptr); \
} while (failure != 0); \
\
return (oldval & mask) >> shift; \
}
SUBWORD_TEST_AND_SET (short, 2)
SUBWORD_TEST_AND_SET (signed char, 1)
#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
void HIDDEN \
__sync_lock_release_##WIDTH (TYPE *ptr) \
{ \
/* All writes before this point must be seen before we release \
the lock itself. */ \
__kernel_dmb (); \
*ptr = 0; \
}
SYNC_LOCK_RELEASE (long long, 8)
SYNC_LOCK_RELEASE (int, 4)
SYNC_LOCK_RELEASE (short, 2)
SYNC_LOCK_RELEASE (char, 1)
| selmentdev/selment-toolchain | source/gcc-latest/libgcc/config/arm/linux-atomic.c | C | gpl-3.0 | 8,645 |
// { dg-require-namedlocale "de_DE.ISO8859-15" }
// 2004-02-05 Paolo Carlini <pcarlini@suse.de>
// Copyright (C) 2004-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// 22.2.6.1.1 money_get members
#include <locale>
#include <sstream>
#include <testsuite_hooks.h>
// No thousands-sep allowed after the decimal-point.
void test01()
{
using namespace std;
bool test __attribute__((unused)) = true;
typedef istreambuf_iterator<wchar_t> iterator_type;
// basic construction
locale loc_c = locale::classic();
locale loc_de = locale(ISO_8859(15,de_DE));
VERIFY( loc_c != loc_de );
iterator_type end01, end02;
wistringstream iss;
iss.imbue(loc_de);
// cache the money_get facet
const money_get<wchar_t>& mon_get = use_facet<money_get<wchar_t> >(iss.getloc());
iss.str(L"500,1.0 ");
iterator_type is_it01(iss);
long double result1;
ios_base::iostate err01 = ios_base::goodbit;
end01 = mon_get.get(is_it01, end01, true, iss, err01, result1);
VERIFY( err01 == ios_base::failbit );
VERIFY( *end01 == '.' );
iss.str(L"500,1.0 ");
iterator_type is_it02(iss);
long double result2;
ios_base::iostate err02 = ios_base::goodbit;
end02 = mon_get.get(is_it02, end02, false, iss, err02, result2);
VERIFY( err02 == ios_base::failbit );
VERIFY( *end02 == '.' );
}
int main()
{
test01();
return 0;
}
| selmentdev/selment-toolchain | source/gcc-latest/libstdc++-v3/testsuite/22_locale/money_get/get/wchar_t/13.cc | C++ | gpl-3.0 | 2,040 |
function f() {
var x = undefined ? 1 : 4294967295;
print(false || x);
}
f();
| cstipkovic/spidermonkey-research | js/src/jit-test/tests/jaeger/bug652590.js | JavaScript | mpl-2.0 | 85 |
<!DOCTYPE HTML>
<html>
<head>
<title>Cue Points VAST Ads test</title>
<script type="text/javascript" src="../../../mwEmbedLoader.php"></script>
<script type="text/javascript" src="../../../docs/js/doc-bootstrap.js"></script>
<script type="text/javascript">
</script>
<!-- qunit-kaltura must come after qunit-bootstrap.js and after mwEmbedLoader.php and after any jsCallbackReady stuff-->
<script type="text/javascript" src="resources/qunit-kaltura-bootstrap.js"></script>
</head>
<body>
<h2>Cue Points VAST Ads test</h2>
<p>This entry has 3 cue points:</p>
<ul>
<li>00:00 - Preroll</li>
<li>00:20 - Midroll</li>
<li>02:38 - Postroll</li>
</ul>
<br /><br />
<div id="myVideoTarget" style="width:400px;height:330px;"></div>
<script>
kWidget.embed({
'targetId': 'myVideoTarget',
'wid': '_423851',
'uiconf_id' : '6178231',
'entry_id' : '1_9t28c1xt',
'flashvars':{
'externalInterfaceDisabled' : false,
}
});
</script>
</body>
</html> | FlixMaster/mwEmbed | modules/KalturaSupport/tests/CuePointsAds.html | HTML | agpl-3.0 | 982 |
//* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions
//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html
#include "CSVTimeSequenceStepper.h"
registerMooseObject("MooseApp", CSVTimeSequenceStepper);
defineLegacyParams(CSVTimeSequenceStepper);
InputParameters
CSVTimeSequenceStepper::validParams()
{
InputParameters params = TimeSequenceStepperBase::validParams();
params.addRequiredParam<FileName>("file_name",
"name of the file in which the time sequence is read");
params.addParam<bool>("header",
"indicates whether the file contains a header with the column names");
params.addParam<std::string>("delimiter", ",", "delimiter used to parse the file");
params.addParam<std::string>(
"column_name", "time", "name of the column which contains the time sequence");
params.addParam<unsigned int>("column_index",
"index of the column which contains the time sequence");
params.addClassDescription(
"Solves the Transient problem at a sequence of given time points read in a file.");
return params;
}
CSVTimeSequenceStepper::CSVTimeSequenceStepper(const InputParameters & parameters)
: TimeSequenceStepperBase(parameters),
_file_name(getParam<FileName>("file_name")),
_header(isParamValid("header")
? (getParam<bool>("header") ? MooseUtils::DelimitedFileReader::HeaderFlag::ON
: MooseUtils::DelimitedFileReader::HeaderFlag::OFF)
: MooseUtils::DelimitedFileReader::HeaderFlag::AUTO),
_delimiter(getParam<std::string>("delimiter")),
_column_name(getParam<std::string>("column_name")),
_search_by_index(isParamValid("column_index")),
_column_index(_search_by_index ? getParam<unsigned int>("column_index") : 0)
{
}
void
CSVTimeSequenceStepper::init()
{
MooseUtils::DelimitedFileReader file(_file_name);
file.setHeaderFlag(_header);
file.setDelimiter(_delimiter);
file.read();
std::vector<Real> instants;
if (_search_by_index)
{
std::vector<std::vector<double>> data = file.getData();
if (_column_index >= data.size())
mooseError("cannot find column ", _column_index, " in file ", _file_name);
instants = data[_column_index];
}
else
instants = file.getData(_column_name);
if (instants.size() == 0)
mooseError("empty sequence in file ", _file_name);
setupSequence(instants);
}
| harterj/moose | framework/src/timesteppers/CSVTimeSequenceStepper.C | C++ | lgpl-2.1 | 2,657 |
/*
* Copyright 2014 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.apiman.manager.api.beans.search;
import java.io.Serializable;
/**
* Represents a single filter or search criteria. This is used when searching
* for beans.
*
* @author eric.wittmann@redhat.com
*/
public class SearchCriteriaFilterBean implements Serializable {
private static final long serialVersionUID = -1199180207971619165L;
private String name;
private String value;
private SearchCriteriaFilterOperator operator;
/**
* Constructor.
*/
public SearchCriteriaFilterBean() {
}
/**
* @return the name
*/
public String getName() {
return name;
}
/**
* @param name the name to set
*/
public void setName(String name) {
this.name = name;
}
/**
* @return the value
*/
public String getValue() {
return value;
}
/**
* @param value the value to set
*/
public void setValue(String value) {
this.value = value;
}
/**
* @return the operator
*/
public SearchCriteriaFilterOperator getOperator() {
return operator;
}
/**
* @param operator the operator to set
*/
public void setOperator(SearchCriteriaFilterOperator operator) {
this.operator = operator;
}
/**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((operator == null) ? 0 : operator.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
/**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SearchCriteriaFilterBean other = (SearchCriteriaFilterBean) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (operator == null) {
if (other.operator != null)
return false;
} else if (!operator.equals(other.operator))
return false;
if (value == null) {
if (other.value != null)
return false;
} else if (!value.equals(other.value))
return false;
return true;
}
}
| jasonchaffee/apiman | manager/api/beans/src/main/java/io/apiman/manager/api/beans/search/SearchCriteriaFilterBean.java | Java | apache-2.0 | 3,210 |
/**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.core.api.util.xml;
import org.kuali.rice.core.api.CoreConstants;
import javax.xml.ws.WebFault;
@WebFault(name = "XmlFault", targetNamespace = CoreConstants.Namespaces.CORE_NAMESPACE_2_0)
public class XmlException extends RuntimeException {
private static final long serialVersionUID = 5859837720372502809L;
public XmlException(String message) {
super(message);
}
public XmlException(Throwable t) {
super(t);
}
public XmlException(String message, Throwable t) {
super(message, t);
}
}
| bhutchinson/rice | rice-middleware/core/api/src/main/java/org/kuali/rice/core/api/util/xml/XmlException.java | Java | apache-2.0 | 1,230 |
package org.zstack.header.storage.backup;
import org.zstack.header.message.MessageReply;
/**
*/
public class BackupStorageDeletionReply extends MessageReply {
}
| zstackorg/zstack | header/src/main/java/org/zstack/header/storage/backup/BackupStorageDeletionReply.java | Java | apache-2.0 | 164 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.cache;
import org.apache.ignite.transactions.Transaction;
import org.jetbrains.annotations.Nullable;
/**
* Mode indicating how Ignite should wait for write replies from other nodes. Default
* value is {@link #FULL_ASYNC}}, which means that Ignite will not wait for responses from
* participating nodes. This means that by default remote nodes may get their state updated slightly after
* any of the cache write methods complete, or after {@link Transaction#commit()} method completes.
* <p>
* Note that regardless of write synchronization mode, cache data will always remain fully
* consistent across all participating nodes.
* <p>
* Write synchronization mode may be configured via {@link org.apache.ignite.configuration.CacheConfiguration#getWriteSynchronizationMode()}
* configuration property.
*/
public enum CacheWriteSynchronizationMode {
/**
* Flag indicating that Ignite should wait for write or commit replies from all nodes.
* This behavior guarantees that whenever any of the atomic or transactional writes
* complete, all other participating nodes which cache the written data have been updated.
*/
FULL_SYNC,
/**
* Flag indicating that Ignite will not wait for write or commit responses from participating nodes,
* which means that remote nodes may get their state updated a bit after any of the cache write methods
* complete, or after {@link Transaction#commit()} method completes.
*/
FULL_ASYNC,
/**
* This flag only makes sense for {@link CacheMode#PARTITIONED} mode. When enabled, Ignite
* will wait for write or commit to complete on {@code primary} node, but will not wait for
* backups to be updated.
*/
PRIMARY_SYNC;
/** Enumerated values. */
private static final CacheWriteSynchronizationMode[] VALS = values();
/**
* Efficiently gets enumerated value from its ordinal.
*
* @param ord Ordinal value.
* @return Enumerated value or {@code null} if ordinal out of range.
*/
@Nullable public static CacheWriteSynchronizationMode fromOrdinal(int ord) {
return ord >= 0 && ord < VALS.length ? VALS[ord] : null;
}
} | dlnufox/ignite | modules/core/src/main/java/org/apache/ignite/cache/CacheWriteSynchronizationMode.java | Java | apache-2.0 | 3,015 |
/**
* Copyright 2005-2015 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.kew.xml.xstream;
import java.util.List;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFunction;
import javax.xml.xpath.XPathFunctionException;
import org.w3c.dom.Node;
/**
* An XPathFunction which will run XStream safe XPath queries.
*
* @see XStreamSafeEvaluator
*
* @author Kuali Rice Team (rice.collab@kuali.org)
*/
public class XStreamSafeSearchFunction implements XPathFunction {
private final Node rootNode;
private XPath xpath;
private static XStreamSafeEvaluator evaluator = new XStreamSafeEvaluator();
public XStreamSafeSearchFunction(Node rootNode, XPath xpath) {
this.rootNode = rootNode;
this.xpath = xpath;
}
public Object evaluate(List parameters) throws XPathFunctionException {
String xPathExpression = getXPathExpressionParameter(parameters);
evaluator.setXpath(xpath);
//Node rootSearchNode = getRootSearchNodeParameter(parameters);
try {
return evaluator.evaluate(xPathExpression, rootNode);
} catch (XPathExpressionException e) {
throw new XPathFunctionException(e);
}
}
private String getXPathExpressionParameter(List parameters) throws XPathFunctionException {
if (parameters.size() < 1) {
throw new XPathFunctionException("First parameter must be an XPath expression.");
}
if (!(parameters.get(0) instanceof String)) {
throw new XPathFunctionException("First parameter must be an XPath expression String");
}
return (String)parameters.get(0);
}
public XPath getXpath() {
return xpath;
}
public void setXpath(XPath xpath) {
this.xpath = xpath;
}
/*private Node getRootSearchNodeParameter(List parameters) throws XPathFunctionException {
if (parameters.size() < 2) {
throw new XPathFunctionException("Second parameter should be root node and is required");
}
System.out.println(parameters.get(1));
if (!(parameters.get(1) instanceof Node)) {
throw new XPathFunctionException("Second parameter should be an instance of Node (try using the root() XPath function).");
}
return (Node)parameters.get(1);
}*/
}
| bhutchinson/rice | rice-middleware/impl/src/main/java/org/kuali/rice/kew/xml/xstream/XStreamSafeSearchFunction.java | Java | apache-2.0 | 2,731 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing.testers;
import static com.google.common.collect.testing.features.CollectionFeature.ALLOWS_NULL_VALUES;
import static com.google.common.collect.testing.features.CollectionSize.ONE;
import static com.google.common.collect.testing.features.CollectionSize.ZERO;
import static com.google.common.collect.testing.features.ListFeature.SUPPORTS_ADD_WITH_INDEX;
import static java.util.Collections.singletonList;
import com.google.common.annotations.GwtCompatible;
import com.google.common.collect.testing.MinimalCollection;
import com.google.common.collect.testing.features.CollectionFeature;
import com.google.common.collect.testing.features.CollectionSize;
import com.google.common.collect.testing.features.ListFeature;
import java.util.List;
import org.junit.Ignore;
/**
* A generic JUnit test which tests {@code addAll(int, Collection)} operations on a list. Can't be
* invoked directly; please see {@link com.google.common.collect.testing.ListTestSuiteBuilder}.
*
* @author Chris Povirk
*/
@SuppressWarnings("unchecked") // too many "unchecked generic array creations"
@GwtCompatible
@Ignore // Affects only Android test runner, which respects JUnit 4 annotations on JUnit 3 tests.
public class ListAddAllAtIndexTester<E> extends AbstractListTester<E> {
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
@CollectionSize.Require(absent = ZERO)
public void testAddAllAtIndex_supportedAllPresent() {
assertTrue(
"addAll(n, allPresent) should return true",
getList().addAll(0, MinimalCollection.of(e0())));
expectAdded(0, e0());
}
@ListFeature.Require(absent = SUPPORTS_ADD_WITH_INDEX)
@CollectionSize.Require(absent = ZERO)
public void testAddAllAtIndex_unsupportedAllPresent() {
try {
getList().addAll(0, MinimalCollection.of(e0()));
fail("addAll(n, allPresent) should throw");
} catch (UnsupportedOperationException expected) {
}
expectUnchanged();
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
@CollectionSize.Require(absent = ZERO)
public void testAddAllAtIndex_supportedSomePresent() {
assertTrue(
"addAll(n, allPresent) should return true",
getList().addAll(0, MinimalCollection.of(e0(), e3())));
expectAdded(0, e0(), e3());
}
@ListFeature.Require(absent = SUPPORTS_ADD_WITH_INDEX)
@CollectionSize.Require(absent = ZERO)
public void testAddAllAtIndex_unsupportedSomePresent() {
try {
getList().addAll(0, MinimalCollection.of(e0(), e3()));
fail("addAll(n, allPresent) should throw");
} catch (UnsupportedOperationException expected) {
}
expectUnchanged();
expectMissing(e3());
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
public void testAddAllAtIndex_supportedNothing() {
assertFalse("addAll(n, nothing) should return false", getList().addAll(0, emptyCollection()));
expectUnchanged();
}
@ListFeature.Require(absent = SUPPORTS_ADD_WITH_INDEX)
public void testAddAllAtIndex_unsupportedNothing() {
try {
assertFalse(
"addAll(n, nothing) should return false or throw",
getList().addAll(0, emptyCollection()));
} catch (UnsupportedOperationException tolerated) {
}
expectUnchanged();
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
public void testAddAllAtIndex_withDuplicates() {
MinimalCollection<E> elementsToAdd = MinimalCollection.of(e0(), e1(), e0(), e1());
assertTrue("addAll(n, hasDuplicates) should return true", getList().addAll(0, elementsToAdd));
expectAdded(0, e0(), e1(), e0(), e1());
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
@CollectionFeature.Require(ALLOWS_NULL_VALUES)
public void testAddAllAtIndex_nullSupported() {
List<E> containsNull = singletonList(null);
assertTrue("addAll(n, containsNull) should return true", getList().addAll(0, containsNull));
/*
* We need (E) to force interpretation of null as the single element of a
* varargs array, not the array itself
*/
expectAdded(0, (E) null);
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
@CollectionFeature.Require(absent = ALLOWS_NULL_VALUES)
public void testAddAllAtIndex_nullUnsupported() {
List<E> containsNull = singletonList(null);
try {
getList().addAll(0, containsNull);
fail("addAll(n, containsNull) should throw");
} catch (NullPointerException expected) {
}
expectUnchanged();
expectNullMissingWhenNullUnsupported(
"Should not contain null after unsupported addAll(n, containsNull)");
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
@CollectionSize.Require(absent = {ZERO, ONE})
public void testAddAllAtIndex_middle() {
assertTrue(
"addAll(middle, disjoint) should return true",
getList().addAll(getNumElements() / 2, createDisjointCollection()));
expectAdded(getNumElements() / 2, createDisjointCollection());
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
@CollectionSize.Require(absent = ZERO)
public void testAddAllAtIndex_end() {
assertTrue(
"addAll(end, disjoint) should return true",
getList().addAll(getNumElements(), createDisjointCollection()));
expectAdded(getNumElements(), createDisjointCollection());
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
public void testAddAllAtIndex_nullCollectionReference() {
try {
getList().addAll(0, null);
fail("addAll(n, null) should throw");
} catch (NullPointerException expected) {
}
expectUnchanged();
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
public void testAddAllAtIndex_negative() {
try {
getList().addAll(-1, MinimalCollection.of(e3()));
fail("addAll(-1, e) should throw");
} catch (IndexOutOfBoundsException expected) {
}
expectUnchanged();
expectMissing(e3());
}
@ListFeature.Require(SUPPORTS_ADD_WITH_INDEX)
public void testAddAllAtIndex_tooLarge() {
try {
getList().addAll(getNumElements() + 1, MinimalCollection.of(e3()));
fail("addAll(size + 1, e) should throw");
} catch (IndexOutOfBoundsException expected) {
}
expectUnchanged();
expectMissing(e3());
}
}
| rgoldberg/guava | guava-testlib/src/com/google/common/collect/testing/testers/ListAddAllAtIndexTester.java | Java | apache-2.0 | 6,768 |
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
# Apache Hadoop 0.20.205.0 Release Notes
These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
---
* [HADOOP-7724](https://issues.apache.org/jira/browse/HADOOP-7724) | *Major* | **hadoop-setup-conf.sh should put proxy user info into the core-site.xml**
Fixed hadoop-setup-conf.sh to put proxy user in core-site.xml. (Arpit Gupta via Eric Yang)
---
* [HADOOP-7720](https://issues.apache.org/jira/browse/HADOOP-7720) | *Major* | **improve the hadoop-setup-conf.sh to read in the hbase user and setup the configs**
Added parameter for HBase user to setup config script. (Arpit Gupta via Eric Yang)
---
* [HADOOP-7715](https://issues.apache.org/jira/browse/HADOOP-7715) | *Major* | **see log4j Error when running mr jobs and certain dfs calls**
Removed unnecessary security logger configuration. (Eric Yang)
---
* [HADOOP-7711](https://issues.apache.org/jira/browse/HADOOP-7711) | *Major* | **hadoop-env.sh generated from templates has duplicate info**
Fixed recursive sourcing of HADOOP\_OPTS environment variables (Arpit Gupta via Eric Yang)
---
* [HADOOP-7708](https://issues.apache.org/jira/browse/HADOOP-7708) | *Critical* | **config generator does not update the properties file if on exists already**
Fixed hadoop-setup-conf.sh to handle config file consistently. (Eric Yang)
---
* [HADOOP-7707](https://issues.apache.org/jira/browse/HADOOP-7707) | *Major* | **improve config generator to allow users to specify proxy user, turn append on or off, turn webhdfs on or off**
Added toggle for dfs.support.append, webhdfs and hadoop proxy user to setup config script. (Arpit Gupta via Eric Yang)
---
* [HADOOP-7691](https://issues.apache.org/jira/browse/HADOOP-7691) | *Major* | **hadoop deb pkg should take a diff group id**
Fixed conflict uid for install packages. (Eric Yang)
---
* [HADOOP-7684](https://issues.apache.org/jira/browse/HADOOP-7684) | *Major* | **jobhistory server and secondarynamenode should have init.d script**
Added init.d script for jobhistory server and secondary namenode. (Eric Yang)
---
* [HADOOP-7681](https://issues.apache.org/jira/browse/HADOOP-7681) | *Minor* | **log4j.properties is missing properties for security audit and hdfs audit should be changed to info**
HADOOP-7681. Fixed security and hdfs audit log4j properties
(Arpit Gupta via Eric Yang)
---
* [HADOOP-7655](https://issues.apache.org/jira/browse/HADOOP-7655) | *Major* | **provide a small validation script that smoke tests the installed cluster**
Committed to trunk and v23, since code reviewed by Eric.
---
* [HADOOP-7603](https://issues.apache.org/jira/browse/HADOOP-7603) | *Major* | **Set default hdfs, mapred uid, and hadoop group gid for RPM packages**
Set hdfs uid, mapred uid, and hadoop gid to fixed numbers (201, 202, and 123, respectively).
---
* [HADOOP-7119](https://issues.apache.org/jira/browse/HADOOP-7119) | *Major* | **add Kerberos HTTP SPNEGO authentication support to Hadoop JT/NN/DN/TT web-consoles**
Adding support for Kerberos HTTP SPNEGO authentication to the Hadoop web-consoles
---
* [HDFS-2358](https://issues.apache.org/jira/browse/HDFS-2358) | *Major* | **NPE when the default filesystem's uri has no authority**
Give meaningful error message instead of NPE.
---
* [HDFS-2338](https://issues.apache.org/jira/browse/HDFS-2338) | *Major* | **Configuration option to enable/disable webhdfs.**
Added a conf property dfs.webhdfs.enabled for enabling/disabling webhdfs.
---
* [HDFS-2318](https://issues.apache.org/jira/browse/HDFS-2318) | *Major* | **Provide authentication to webhdfs using SPNEGO**
Added two new conf properties dfs.web.authentication.kerberos.principal and dfs.web.authentication.kerberos.keytab for the SPNEGO servlet filter.
---
* [HDFS-2202](https://issues.apache.org/jira/browse/HDFS-2202) | *Major* | **Changes to balancer bandwidth should not require datanode restart.**
New dfsadmin command added: [-setBalancerBandwidth \<bandwidth\>] where bandwidth is max network bandwidth in bytes per second that the balancer is allowed to use on each datanode during balacing.
This is an incompatible change in 0.23. The versions of ClientProtocol and DatanodeProtocol are changed.
---
* [HDFS-1554](https://issues.apache.org/jira/browse/HDFS-1554) | *Major* | **Append 0.20: New semantics for recoverLease**
Change recoverLease API to return if the file is closed or not. It also change the semantics of recoverLease to start lease recovery immediately.
---
* [HDFS-630](https://issues.apache.org/jira/browse/HDFS-630) | *Major* | **In DFSOutputStream.nextBlockOutputStream(), the client can exclude specific datanodes when locating the next block.**
**WARNING: No release note provided for this incompatible change.**
---
* [MAPREDUCE-3112](https://issues.apache.org/jira/browse/MAPREDUCE-3112) | *Major* | **Calling hadoop cli inside mapreduce job leads to errors**
Removed inheritance of certain server environment variables (HADOOP\_OPTS and HADOOP\_ROOT\_LOGGER) in task attempt process.
---
* [MAPREDUCE-3081](https://issues.apache.org/jira/browse/MAPREDUCE-3081) | *Major* | **Change the name format for hadoop core and vaidya jar to be hadoop-{core/vaidya}-{version}.jar in vaidya.sh**
contrib/vaidya/bin/vaidya.sh script fixed to use appropriate jars and classpath
---
* [MAPREDUCE-2777](https://issues.apache.org/jira/browse/MAPREDUCE-2777) | *Major* | **Backport MAPREDUCE-220 to Hadoop 20 security branch**
Adds cumulative cpu usage and total heap usage to task counters. This is a backport of MAPREDUCE-220 and MAPREDUCE-2469.
---
* [MAPREDUCE-2764](https://issues.apache.org/jira/browse/MAPREDUCE-2764) | *Major* | **Fix renewal of dfs delegation tokens**
Generalizes token renewal and canceling to a common interface and provides a plugin interface for adding renewers for new kinds of tokens. Hftp changed to store the tokens as HFTP and renew them over http.
---
* [MAPREDUCE-2494](https://issues.apache.org/jira/browse/MAPREDUCE-2494) | *Major* | **Make the distributed cache delete entires using LRU priority**
Added config option mapreduce.tasktracker.cache.local.keep.pct to the TaskTracker. It is the target percentage of the local distributed cache that should be kept in between garbage collection runs. In practice it will delete unused distributed cache entries in LRU order until the size of the cache is less than mapreduce.tasktracker.cache.local.keep.pct of the maximum cache size. This is a floating point value between 0.0 and 1.0. The default is 0.95.
---
* [MAPREDUCE-2187](https://issues.apache.org/jira/browse/MAPREDUCE-2187) | *Major* | **map tasks timeout during sorting**
I just committed this. Thanks Anupam!
| robzor92/hops | hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/RELEASENOTES.0.20.205.0.md | Markdown | apache-2.0 | 7,561 |
/*
* Copyright 2010 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.core.common;
public interface RuleFlowGroupListener {
void ruleFlowGroupDeactivated();
}
| amckee23/drools | drools-core/src/main/java/org/drools/core/common/RuleFlowGroupListener.java | Java | apache-2.0 | 714 |
#region license
// Copyright (c) 2007-2010 Mauricio Scheffer
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using System.Data;
using System.Linq.Expressions;
using NHibernate.Engine;
using NHibernate.Stat;
using NHibernate.Type;
namespace NHibernate.SolrNet.Impl {
public class DelegatingSession : ISession {
private readonly ISession session;
public DelegatingSession(ISession session) {
if (session == null)
throw new ArgumentNullException("session");
this.session = session;
}
public void Dispose() {
session.Dispose();
}
public void Flush() {
session.Flush();
}
public IDbConnection Disconnect() {
return session.Disconnect();
}
public void Reconnect() {
session.Reconnect();
}
public void Reconnect(IDbConnection connection) {
session.Reconnect(connection);
}
public IDbConnection Close() {
return session.Close();
}
public void CancelQuery() {
session.CancelQuery();
}
public bool IsDirty() {
return session.IsDirty();
}
public bool IsReadOnly(object entityOrProxy) {
return session.IsReadOnly(entityOrProxy);
}
public void SetReadOnly(object entityOrProxy, bool readOnly) {
session.SetReadOnly(entityOrProxy, readOnly);
}
public object GetIdentifier(object obj) {
return session.GetIdentifier(obj);
}
public bool Contains(object obj) {
return session.Contains(obj);
}
public void Evict(object obj) {
session.Evict(obj);
}
public object Load(System.Type theType, object id, LockMode lockMode) {
return session.Load(theType, id, lockMode);
}
public object Load(string entityName, object id, LockMode lockMode) {
return session.Load(entityName, id, lockMode);
}
public object Load(System.Type theType, object id) {
return session.Load(theType, id);
}
public T Load<T>(object id, LockMode lockMode) {
return session.Load<T>(id, lockMode);
}
public T Load<T>(object id) {
return session.Load<T>(id);
}
public object Load(string entityName, object id) {
return session.Load(entityName, id);
}
public void Load(object obj, object id) {
session.Load(obj, id);
}
public void Replicate(object obj, ReplicationMode replicationMode) {
session.Replicate(obj, replicationMode);
}
public void Replicate(string entityName, object obj, ReplicationMode replicationMode) {
session.Replicate(entityName, obj, replicationMode);
}
public object Save(object obj) {
return session.Save(obj);
}
public void Save(object obj, object id) {
session.Save(obj, id);
}
public object Save(string entityName, object obj) {
return session.Save(entityName, obj);
}
public void SaveOrUpdate(object obj) {
session.SaveOrUpdate(obj);
}
public void SaveOrUpdate(string entityName, object obj) {
session.SaveOrUpdate(entityName, obj);
}
public void Update(object obj) {
session.Update(obj);
}
public void Update(object obj, object id) {
session.Update(obj, id);
}
public void Update(string entityName, object obj) {
session.Update(entityName, obj);
}
public object Merge(object obj) {
return session.Merge(obj);
}
public object Merge(string entityName, object obj) {
return session.Merge(entityName, obj);
}
public T Merge<T>(T entity) where T : class {
return session.Merge(entity);
}
public T Merge<T>(string entityName, T entity) where T : class {
return session.Merge(entityName, entity);
}
public void Persist(object obj) {
session.Persist(obj);
}
public void Persist(string entityName, object obj) {
session.Persist(entityName, obj);
}
public object SaveOrUpdateCopy(object obj) {
return session.SaveOrUpdateCopy(obj);
}
public object SaveOrUpdateCopy(object obj, object id) {
return session.SaveOrUpdateCopy(obj, id);
}
public void Delete(object obj) {
session.Delete(obj);
}
public void Delete(string entityName, object obj) {
session.Delete(entityName, obj);
}
public int Delete(string query) {
return session.Delete(query);
}
public int Delete(string query, object value, IType type) {
return session.Delete(query, value, type);
}
public int Delete(string query, object[] values, IType[] types) {
return session.Delete(query, values, types);
}
public void Lock(object obj, LockMode lockMode) {
session.Lock(obj, lockMode);
}
public void Lock(string entityName, object obj, LockMode lockMode) {
session.Lock(entityName, obj, lockMode);
}
public void Refresh(object obj) {
session.Refresh(obj);
}
public void Refresh(object obj, LockMode lockMode) {
session.Refresh(obj, lockMode);
}
public LockMode GetCurrentLockMode(object obj) {
return session.GetCurrentLockMode(obj);
}
public ITransaction BeginTransaction() {
return session.BeginTransaction();
}
public ITransaction BeginTransaction(IsolationLevel isolationLevel) {
return session.BeginTransaction(isolationLevel);
}
public ICriteria CreateCriteria<T>() where T : class {
return session.CreateCriteria<T>();
}
public ICriteria CreateCriteria<T>(string alias) where T : class {
return session.CreateCriteria<T>(alias);
}
public ICriteria CreateCriteria(System.Type persistentClass) {
return session.CreateCriteria(persistentClass);
}
public ICriteria CreateCriteria(System.Type persistentClass, string alias) {
return session.CreateCriteria(persistentClass, alias);
}
public ICriteria CreateCriteria(string entityName) {
return session.CreateCriteria(entityName);
}
public ICriteria CreateCriteria(string entityName, string alias) {
return session.CreateCriteria(entityName, alias);
}
public IQueryOver<T, T> QueryOver<T>() where T : class {
return session.QueryOver<T>();
}
public IQueryOver<T, T> QueryOver<T>(Expression<Func<T>> alias) where T : class {
return session.QueryOver<T>(alias);
}
public IQueryOver<T, T> QueryOver<T>(string entityName) where T : class {
return session.QueryOver<T>(entityName);
}
public IQueryOver<T, T> QueryOver<T>(string entityName, Expression<Func<T>> alias) where T : class {
return session.QueryOver<T>(entityName, alias);
}
public IQuery CreateQuery(string queryString) {
return session.CreateQuery(queryString);
}
public IQuery CreateFilter(object collection, string queryString) {
return session.CreateFilter(collection, queryString);
}
public IQuery GetNamedQuery(string queryName) {
return session.GetNamedQuery(queryName);
}
public ISQLQuery CreateSQLQuery(string queryString) {
return session.CreateSQLQuery(queryString);
}
public void Clear() {
session.Clear();
}
public object Get(System.Type clazz, object id) {
return session.Get(clazz, id);
}
public object Get(System.Type clazz, object id, LockMode lockMode) {
return session.Get(clazz, id, lockMode);
}
public object Get(string entityName, object id) {
return session.Get(entityName, id);
}
public T Get<T>(object id) {
return session.Get<T>(id);
}
public T Get<T>(object id, LockMode lockMode) {
return session.Get<T>(id, lockMode);
}
public string GetEntityName(object obj) {
return session.GetEntityName(obj);
}
public IFilter EnableFilter(string filterName) {
return session.EnableFilter(filterName);
}
public IFilter GetEnabledFilter(string filterName) {
return session.GetEnabledFilter(filterName);
}
public void DisableFilter(string filterName) {
session.DisableFilter(filterName);
}
public IMultiQuery CreateMultiQuery() {
return session.CreateMultiQuery();
}
public ISession SetBatchSize(int batchSize) {
return session.SetBatchSize(batchSize);
}
public ISessionImplementor GetSessionImplementation() {
return session.GetSessionImplementation();
}
public IMultiCriteria CreateMultiCriteria() {
return session.CreateMultiCriteria();
}
public ISession GetSession(EntityMode entityMode) {
return session.GetSession(entityMode);
}
public EntityMode ActiveEntityMode {
get { return session.ActiveEntityMode; }
}
public FlushMode FlushMode {
get { return session.FlushMode; }
set { session.FlushMode = value; }
}
public CacheMode CacheMode {
get { return session.CacheMode; }
set { session.CacheMode = value; }
}
public ISessionFactory SessionFactory {
get { return session.SessionFactory; }
}
public IDbConnection Connection {
get { return session.Connection; }
}
public bool IsOpen {
get { return session.IsOpen; }
}
public bool IsConnected {
get { return session.IsConnected; }
}
public bool DefaultReadOnly {
get { return session.DefaultReadOnly; }
set { session.DefaultReadOnly = value; }
}
public ITransaction Transaction {
get { return session.Transaction; }
}
public ISessionStatistics Statistics {
get { return session.Statistics; }
}
}
} | ManpowerNordic/SolrNet | NHibernate.SolrNet/Impl/DelegatingSession.cs | C# | apache-2.0 | 11,418 |
package org.zstack.utils;
import java.util.HashMap;
import java.util.Map;
import static org.zstack.utils.StringDSL.ln;
/**
* Created by xing5 on 2016/6/16.
*/
public class StringBind {
private String template;
private Map<String, Object> tokens = new HashMap<>();
public StringBind(String template) {
this.template = template;
}
public StringBind bind(String key, Object value) {
tokens.put(key, value);
return this;
}
@Override
public String toString() {
return ln(template).formatByMap(tokens);
}
}
| zstackorg/zstack | utils/src/main/java/org/zstack/utils/StringBind.java | Java | apache-2.0 | 578 |
/*
* Copyright 2005 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.core.audit;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.List;
import org.drools.core.WorkingMemory;
import org.drools.core.audit.event.LogEvent;
import org.kie.internal.event.KnowledgeRuntimeEventManager;
import com.thoughtworks.xstream.XStream;
/**
* A logger of events generated by a working memory.
* It stores its information in memory, so it can be retrieved later.
*/
public class WorkingMemoryInMemoryLogger extends WorkingMemoryLogger {
private List<LogEvent> events = new ArrayList<LogEvent>();
public WorkingMemoryInMemoryLogger() {
}
public WorkingMemoryInMemoryLogger(final WorkingMemory workingMemory) {
super( workingMemory );
}
public WorkingMemoryInMemoryLogger(final KnowledgeRuntimeEventManager session) {
super( session );
}
@SuppressWarnings("unchecked")
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
events = (List<LogEvent>) in.readObject();
}
public void writeExternal(ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeObject(events);
}
public String getEvents() {
final XStream xstream = new XStream();
StringWriter writer = new StringWriter();
try {
final ObjectOutputStream out = xstream.createObjectOutputStream(writer);
out.writeObject( this.events );
out.close();
} catch (Throwable t) {
throw new RuntimeException("Unable to create event output: " + t.getMessage());
}
return writer.toString();
}
/**
* Clears all the events in the log.
*/
public void clear() {
this.events.clear();
}
/**
* @see org.kie.audit.WorkingMemoryLogger
*/
public void logEventCreated(final LogEvent logEvent) {
this.events.add( logEvent );
}
public List<LogEvent> getLogEvents() {
return this.events;
}
}
| ThiagoGarciaAlves/drools | drools-core/src/main/java/org/drools/core/audit/WorkingMemoryInMemoryLogger.java | Java | apache-2.0 | 2,779 |
package org.zstack.test.network;
import junit.framework.Assert;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.zstack.core.cloudbus.CloudBus;
import org.zstack.core.componentloader.ComponentLoader;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.header.network.l2.L2NetworkInventory;
import org.zstack.header.network.l3.*;
import org.zstack.header.zone.ZoneInventory;
import org.zstack.test.*;
import java.util.concurrent.TimeUnit;
public class TestFirstAvailableIpAllocatorStrategyReturnIp {
Api api;
ComponentLoader loader;
DatabaseFacade dbf;
CloudBus bus;
@Before
public void setUp() throws Exception {
DBUtil.reDeployDB();
BeanConstructor con = new WebBeanConstructor();
/* This loads spring application context */
loader = con.addXml("PortalForUnitTest.xml").addXml("ZoneManager.xml").addXml("NetworkManager.xml").addXml("AccountManager.xml").build();
dbf = loader.getComponent(DatabaseFacade.class);
bus = loader.getComponent(CloudBus.class);
api = new Api();
api.startServer();
}
@After
public void tearDown() throws Exception {
api.stopServer();
}
@Test
public void test() throws ApiSenderException, InterruptedException {
ZoneInventory zone = api.createZones(1).get(0);
L2NetworkInventory linv = api.createNoVlanL2Network(zone.getUuid(), "eth0");
L3NetworkInventory l3inv = api.createL3BasicNetwork(linv.getUuid());
L3NetworkVO vo = dbf.findByUuid(l3inv.getUuid(), L3NetworkVO.class);
Assert.assertNotNull(vo);
IpRangeInventory ipInv = api.addIpRange(l3inv.getUuid(), "10.223.110.10", "10.223.110.20", "10.223.110.1", "255.255.255.0");
IpRangeVO ipvo = dbf.findByUuid(ipInv.getUuid(), IpRangeVO.class);
Assert.assertNotNull(ipvo);
AllocateIpMsg msg = new AllocateIpMsg();
msg.setL3NetworkUuid(l3inv.getUuid());
msg.setServiceId(bus.makeLocalServiceId(L3NetworkConstant.SERVICE_ID));
msg.setAllocateStrategy(L3NetworkConstant.FIRST_AVAILABLE_IP_ALLOCATOR_STRATEGY);
AllocateIpReply reply = (AllocateIpReply) bus.call(msg);
UsedIpInventory uinv = reply.getIpInventory();
Assert.assertEquals("10.223.110.10", uinv.getIp());
ReturnIpMsg rmsg = new ReturnIpMsg();
rmsg.setL3NetworkUuid(l3inv.getUuid());
rmsg.setUsedIpUuid(uinv.getUuid());
rmsg.setServiceId(bus.makeLocalServiceId(L3NetworkConstant.SERVICE_ID));
bus.send(rmsg);
TimeUnit.SECONDS.sleep(1);
UsedIpVO uvo = dbf.findByUuid(uinv.getUuid(), UsedIpVO.class);
Assert.assertEquals(null, uvo);
}
}
| zstackorg/zstack | test/src/test/java/org/zstack/test/network/TestFirstAvailableIpAllocatorStrategyReturnIp.java | Java | apache-2.0 | 2,723 |
/*
* (C) 2003-15 - ntop
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/errno.h>
#include <sys/socket.h>
#include <sys/signal.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <pcap/pcap.h>
#define PCAP_MAGIC 0xa1b2c3d4
#define PCAP_NSEC_MAGIC 0xa1b23c4d
struct pcap_disk_timeval {
u_int32_t tv_sec;/* seconds */
u_int32_t tv_usec;/* microseconds */
};
struct pcap_disk_pkthdr {
struct pcap_disk_timeval ts;/* time stamp */
u_int32_t caplen;/* length of portion present */
u_int32_t len;/* length this packet (off wire) */
};
FILE *out_fd = NULL;
char *out_pipe_filename = NULL;
int out_fd_id, verbose = 0, nsec_ts = 0;
int snaplen = 1514;
char *in_device = NULL;
pcap_t *pd = NULL;
int run();
void write_pcap_header();
void processsPacket(u_char *notUsed,
const struct pcap_pkthdr *header,
const u_char *packet);
/* ************************************* */
void cleanup(int signal) {
if(verbose) printf("Cleaning up resource\n");
if(out_pipe_filename) unlink(out_pipe_filename);
if(pd) pcap_close(pd);
if(signal == SIGPIPE)
run();
else {
if(verbose) printf("Exiting...\n");
exit(0);
}
}
/* ************************************* */
int run() {
char errbuf[PCAP_ERRBUF_SIZE];
int rc, promisc = 1;
if((rc = unlink(out_pipe_filename)) != 0) { /* Just to be safe */
if(verbose) printf("Unlink failed: %d\n", rc);
} else
if(verbose) printf("Deleted named pipe %s\n", out_pipe_filename);
if(verbose) printf("Creating named pipe %s...\n", out_pipe_filename);
/* read/write permissions for owner, and with read permissions for group and others. */
if(mkfifo(out_pipe_filename, S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH) != 0) {
printf("ERROR: Unable to create named pipe %s: file already existing ?\n",
out_pipe_filename);
exit(-1);
}
if(verbose) printf("Waiting for process to attach to named pipe %s...\n",
out_pipe_filename);
if((out_fd = fopen(out_pipe_filename, "w")) == NULL) {
printf("ERROR: Unable to write into file %s\n", out_pipe_filename);
cleanup(-1);
} else
out_fd_id = fileno(out_fd);
pd = pcap_open_live(in_device, snaplen, promisc, 500, errbuf);
if(pd == NULL) pd = pcap_open_offline(in_device, errbuf);
if(pd == NULL) {
printf("ERROR: unable to open pcap device/file %s: %s\n",
in_device, errbuf);
return(-1);
}
write_pcap_header();
pcap_loop(pd, -1, processsPacket, NULL);
cleanup(-1);
return(0);
}
/* ************************************* */
static void help() {
printf("pcap2nspcap [-v] [-n] -i <device> -o <named pipe>\n");
printf("Usage:\n");
printf("-v | Verbose\n");
printf("-n | Use nsec timestamps\n");
printf("-i <device> | Device name from which packets are captured\n");
printf("-o <named pipe> | Output named pipe\n");
printf("\n");
printf("Example:\n");
printf("\t# ./pcap2nspcap -i eth0 -o /tmp/mypipe\n");
printf("\t# tcpdump -n -r /tmp/mypipe\n");
exit(0);
}
/* ************************************* */
/*
* put data onto the end of global ring buffer "buf"
*/
void append(char *ptr, int len) {
int writesize = len;
while(writesize > 0) {
int rc = write(out_fd_id, &ptr[len-writesize], writesize);
if(rc < 0) {
printf("ERROR: Fatal write error: %s\n", strerror(errno));
cleanup(SIGPIPE);
} else
writesize -= rc;
}
}
/* ************************************* */
void write_pcap_header() {
struct pcap_file_header fh;
/* Add dummy header */
if(nsec_ts)
fh.magic = PCAP_NSEC_MAGIC; /* nsec */
else
fh.magic = PCAP_MAGIC; /* usec */
fh.version_major = 2;
fh.version_minor = 4;
fh.thiszone = 0;
fh.sigfigs = 0;
fh.snaplen = snaplen;
fh.linktype = 1;
append((char *)&fh, sizeof(fh));
}
/* ************************************* */
void processsPacket(u_char *notUsed,
const struct pcap_pkthdr *header,
const u_char *packet) {
if(nsec_ts) {
struct pcap_disk_pkthdr hdr;
struct ns_pcaphdr *myhdr = (struct ns_pcaphdr*)header;
memcpy(&hdr, header, sizeof(hdr));
hdr.ts.tv_usec = myhdr->ns;
append((char *)header, sizeof(struct pcap_disk_pkthdr));
} else
append((char *)header, sizeof(struct pcap_disk_pkthdr));
append((char *)packet, header->caplen);
}
/* ***************************************** */
int main(int argc, char* argv[]) {
char c;
while((c = getopt(argc, argv, "hvi:o:n")) != -1) {
switch(c) {
case 'i':
in_device = strdup(optarg);
break;
case 'n':
nsec_ts = 1;
break;
case 'o':
out_pipe_filename = strdup(optarg);
break;
case 'v':
verbose = 1;
break;
default:
help();
break;
}
}
if((in_device == NULL) || (out_pipe_filename == NULL))
help();
signal(SIGQUIT, cleanup);
signal(SIGTERM, cleanup);
signal(SIGPIPE, cleanup);
if(nsec_ts)
printf("Using nsec timestamps\n");
run();
return(0);
}
| mpeuster/son-examples | vnfs/sonata-vtc-vnf-docker/pfring_web_api/vtc/PF_RING/userland/examples/pcap2nspcap.c | C | apache-2.0 | 6,269 |
/*
* Zed Attack Proxy (ZAP) and its related class files.
*
* ZAP is an HTTP/HTTPS proxy for assessing web application security.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zaproxy.zap.spider.filters;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.httpclient.URIException;
import org.parosproxy.paros.network.HttpMessage;
import org.parosproxy.paros.network.HttpStatusCode;
/**
* The DefaultParseFilter is an implementation of a {@link ParseFilter} that is default for
* spidering process. Its filter rules are the following:<br/>
* <ul>
* <li>the resource body should be under MAX_RESPONSE_BODY_SIZE bytes, otherwise it's probably a
* binary resource.</li>
* <li>the resource must be of parsable type (text, html, xml, javascript). Actually, the content
* type should be text/...</li>
* </ul>
*/
public class DefaultParseFilter extends ParseFilter {
/**
* The Constant MAX_RESPONSE_BODY_SIZE defining the size of response body that is considered too
* big for a parsable file.
*/
public static final int MAX_RESPONSE_BODY_SIZE = 512000;
/**
* a pattern to match the SQLite based ".svn/wc.db" file name.
*/
private static final Pattern svnSQLiteFilenamePattern = Pattern.compile (".*/\\.svn/wc.db$");
/**
* a pattern to match the XML based ".svn/entries" file name.
*/
private static final Pattern svnXMLFilenamePattern = Pattern.compile (".*/\\.svn/entries$");
/**
* a pattern to match the Git index file.
*/
private static final Pattern gitFilenamePattern = Pattern.compile (".*/\\.git/index$");
@Override
public boolean isFiltered(HttpMessage responseMessage) {
//if it's a file ending in "/.svn/entries", or "/.svn/wc.db", the SVN Entries or Git parsers will process it
//regardless of type, and regardless of whether it exceeds the file size restriction below.
Matcher svnXMLFilenameMatcher, svnSQLiteFilenameMatcher, gitFilenameMatcher;
try {
String fullfilename = responseMessage.getRequestHeader().getURI().getPath();
//handle null paths
if (fullfilename == null) fullfilename = "";
svnSQLiteFilenameMatcher = svnSQLiteFilenamePattern.matcher(fullfilename);
svnXMLFilenameMatcher = svnXMLFilenamePattern.matcher(fullfilename);
gitFilenameMatcher = gitFilenamePattern.matcher(fullfilename);
if ( svnSQLiteFilenameMatcher.find() || svnXMLFilenameMatcher.find() || gitFilenameMatcher.find())
return false;
} catch (URIException e) {
//give other parsers a chance to parse it.
log.error(e);
}
// Check response body size
if (responseMessage.getResponseBody().length() > MAX_RESPONSE_BODY_SIZE) {
if (log.isDebugEnabled()) {
log.debug("Resource too large: " + responseMessage.getRequestHeader().getURI());
}
return true;
}
// If it's a redirection, accept it, as the SpiderRedirectParser will process it
if (HttpStatusCode.isRedirection(responseMessage.getResponseHeader().getStatusCode()))
return false;
// Check response type.
if (!responseMessage.getResponseHeader().isText()) {
if (log.isDebugEnabled()) {
log.debug("Resource is not text: " + responseMessage.getRequestHeader().getURI());
}
return true;
}
return false;
}
}
| profjrr/zaproxy | src/org/zaproxy/zap/spider/filters/DefaultParseFilter.java | Java | apache-2.0 | 3,774 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.jackrabbit.oak.jcr;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.util.Collection;
import java.util.List;
import java.util.Random;
import javax.jcr.Binary;
import javax.jcr.Repository;
import javax.jcr.RepositoryException;
import javax.jcr.Session;
import javax.jcr.SimpleCredentials;
import org.apache.jackrabbit.api.JackrabbitRepository;
import org.apache.jackrabbit.api.ReferenceBinary;
import org.apache.jackrabbit.commons.jackrabbit.SimpleReferenceBinary;
import org.apache.jackrabbit.core.data.RandomInputStream;
import org.apache.jackrabbit.oak.fixture.DocumentMongoFixture;
import org.apache.jackrabbit.oak.fixture.NodeStoreFixture;
import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore;
import org.apache.jackrabbit.oak.plugins.document.MongoUtils;
import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
import org.apache.jackrabbit.oak.segment.file.FileStore;
import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
import org.apache.jackrabbit.oak.segment.fixture.SegmentTarFixture;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
import org.apache.jackrabbit.oak.spi.blob.FileBlobStore;
import org.apache.jackrabbit.oak.spi.state.NodeStore;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import com.google.common.collect.Lists;
import com.google.common.io.BaseEncoding;
@RunWith(Parameterized.class)
public class ReferenceBinaryIT {
//Taken from org.apache.jackrabbit.oak.plugins.segment.Segment
//As SegmentStore inlines binary content with size less then MEDIUM_LIMIT
static final int SMALL_LIMIT = 1 << 7;
static final int MEDIUM_LIMIT = (1 << (16 - 2)) + SMALL_LIMIT;
private static final int STREAM_LENGTH = MEDIUM_LIMIT + 1000;
private final NodeStoreFixture fixture;
private NodeStore nodeStore;
private Repository repository;
public ReferenceBinaryIT(NodeStoreFixture fixture) {
this.fixture = fixture;
}
@Before
public void setup() throws RepositoryException {
nodeStore = fixture.createNodeStore();
repository = new Jcr(nodeStore).createRepository();
}
/**
* Taken from org.apache.jackrabbit.core.value.ReferenceBinaryTest
* @throws Exception
*/
@Test
public void testReferenceBinaryExchangeWithSharedRepository() throws Exception {
Session firstSession = createAdminSession();
// create a binary
Binary b = firstSession.getValueFactory().createBinary(new RandomInputStream(1, STREAM_LENGTH));
ReferenceBinary referenceBinary = null;
if (b instanceof ReferenceBinary) {
referenceBinary = (ReferenceBinary) b;
}
assertNotNull(referenceBinary);
assertNotNull(referenceBinary.getReference());
// in the current test the message is exchanged via repository which is shared as well
// put the reference message value in a property on a node
String newNode = "sample_" + System.nanoTime();
firstSession.getRootNode().addNode(newNode).setProperty("reference", referenceBinary.getReference());
// save the first session
firstSession.save();
// get a second session over the same repository / ds
Session secondSession = repository.login(new SimpleCredentials("admin", "admin".toCharArray()));
// read the binary referenced by the referencing binary
String reference = secondSession.getRootNode().getNode(newNode).getProperty("reference").getString();
ReferenceBinary ref = new SimpleReferenceBinary(reference);
assertEquals(b, secondSession.getValueFactory().createValue(ref).getBinary());
safeLogout(firstSession);
safeLogout(secondSession);
}
@After
public void tearDown() {
if (repository instanceof JackrabbitRepository) {
((JackrabbitRepository) repository).shutdown();
}
fixture.dispose(nodeStore);
}
@Parameterized.Parameters(name="{0}")
public static Collection<Object[]> fixtures() throws Exception {
File file = getTestDir("tar");
FileStore fileStore = FileStoreBuilder.fileStoreBuilder(file)
.withBlobStore(createBlobStore())
.withMaxFileSize(256)
.withMemoryMapping(true)
.build();
SegmentNodeStore sns = SegmentNodeStoreBuilders.builder(fileStore).build();
List<Object[]> fixtures = Lists.newArrayList();
SegmentTarFixture segmentTarFixture = new SegmentTarFixture(sns);
if (segmentTarFixture.isAvailable()) {
fixtures.add(new Object[] {segmentTarFixture});
}
FileBlobStore fbs = new FileBlobStore(getTestDir("fbs1").getAbsolutePath());
fbs.setReferenceKeyPlainText("foobar");
FileStore fileStoreWithFBS = FileStoreBuilder.fileStoreBuilder(getTestDir("tar2"))
.withBlobStore(fbs)
.withMaxFileSize(256)
.withMemoryMapping(true)
.build();
SegmentNodeStore snsWithFBS = SegmentNodeStoreBuilders.builder(fileStoreWithFBS).build();
SegmentTarFixture segmentTarFixtureFBS = new SegmentTarFixture(snsWithFBS);
if (segmentTarFixtureFBS.isAvailable()) {
fixtures.add(new Object[] {segmentTarFixtureFBS});
}
DocumentMongoFixture documentFixture = new DocumentMongoFixture(MongoUtils.URL, createBlobStore());
if (documentFixture.isAvailable()) {
fixtures.add(new Object[]{documentFixture});
}
return fixtures;
}
private static BlobStore createBlobStore(){
File file = getTestDir("datastore");
OakFileDataStore fds = new OakFileDataStore();
byte[] key = new byte[256];
new Random().nextBytes(key);
fds.setReferenceKeyEncoded(BaseEncoding.base64().encode(key));
fds.setMinRecordLength(4092);
fds.init(file.getAbsolutePath());
return new DataStoreBlobStore(fds);
}
private static File getTestDir(String prefix) {
return new File(new File("target"), prefix+ "." + System.nanoTime());
}
private Session createAdminSession() throws RepositoryException {
return repository.login(new SimpleCredentials("admin", "admin".toCharArray()));
}
private static void safeLogout(Session session) {
try {
session.logout();
} catch (Exception ignore) {}
}
} | mduerig/jackrabbit-oak | oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/ReferenceBinaryIT.java | Java | apache-2.0 | 7,625 |
require 'fog/core/collection'
require 'fog/rackspace/models/databases/database'
module Fog
module Rackspace
class Databases
class Databases < Fog::Collection
model Fog::Rackspace::Databases::Database
attr_accessor :instance
def all
load(retrieve_databases)
end
def get(database_name)
data = retrieve_databases.find { |database| database['name'] == database_name }
data && new(data)
end
private
def retrieve_databases
requires :instance
data = service.list_databases(instance.id).body['databases']
end
end
end
end
end
| jreichhold/chef-repo | vendor/ruby/2.0.0/gems/fog-1.20.0/lib/fog/rackspace/models/databases/databases.rb | Ruby | apache-2.0 | 670 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extra dhcp opts support
Revision ID: 53bbd27ec841
Revises: 40dffbf4b549
Create Date: 2013-05-09 15:36:50.485036
"""
# revision identifiers, used by Alembic.
revision = '53bbd27ec841'
down_revision = '40dffbf4b549'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'extradhcpopts',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('opt_name', sa.String(length=64), nullable=False),
sa.Column('opt_value', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('port_id', 'opt_name', name='uidx_portid_optname'))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.drop_table('extradhcpopts')
### end Alembic commands ###
| ntt-sic/neutron | neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py | Python | apache-2.0 | 2,051 |
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes/scheme"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/reference"
storagehelpers "k8s.io/component-helpers/storage/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
// AnnBindCompleted Annotation applies to PVCs. It indicates that the lifecycle
// of the PVC has passed through the initial setup. This information changes how
// we interpret some observations of the state of the objects. Value of this
// Annotation does not matter.
AnnBindCompleted = "pv.kubernetes.io/bind-completed"
// AnnBoundByController annotation applies to PVs and PVCs. It indicates that
// the binding (PV->PVC or PVC->PV) was installed by the controller. The
// absence of this annotation means the binding was done by the user (i.e.
// pre-bound). Value of this annotation does not matter.
// External PV binders must bind PV the same way as PV controller, otherwise PV
// controller may not handle it correctly.
AnnBoundByController = "pv.kubernetes.io/bound-by-controller"
// AnnSelectedNode annotation is added to a PVC that has been triggered by scheduler to
// be dynamically provisioned. Its value is the name of the selected node.
AnnSelectedNode = "volume.kubernetes.io/selected-node"
// NotSupportedProvisioner is a special provisioner name which can be set
// in storage class to indicate dynamic provisioning is not supported by
// the storage.
NotSupportedProvisioner = "kubernetes.io/no-provisioner"
// AnnDynamicallyProvisioned annotation is added to a PV that has been dynamically provisioned by
// Kubernetes. Its value is name of volume plugin that created the volume.
// It serves both user (to show where a PV comes from) and Kubernetes (to
// recognize dynamically provisioned PVs in its decisions).
AnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
// AnnMigratedTo annotation is added to a PVC and PV that is supposed to be
// dynamically provisioned/deleted by by its corresponding CSI driver
// through the CSIMigration feature flags. When this annotation is set the
// Kubernetes components will "stand-down" and the external-provisioner will
// act on the objects
AnnMigratedTo = "pv.kubernetes.io/migrated-to"
// AnnStorageProvisioner annotation is added to a PVC that is supposed to be dynamically
// provisioned. Its value is name of volume plugin that is supposed to provision
// a volume for this PVC.
// TODO: remove beta anno once deprecation period ends
AnnStorageProvisioner = "volume.kubernetes.io/storage-provisioner"
AnnBetaStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
//PVDeletionProtectionFinalizer is the finalizer added by the external-provisioner on the PV
PVDeletionProtectionFinalizer = "external-provisioner.volume.kubernetes.io/finalizer"
)
// IsDelayBindingProvisioning checks if claim provisioning with selected-node annotation
func IsDelayBindingProvisioning(claim *v1.PersistentVolumeClaim) bool {
// When feature VolumeScheduling enabled,
// Scheduler signal to the PV controller to start dynamic
// provisioning by setting the "AnnSelectedNode" annotation
// in the PVC
_, ok := claim.Annotations[AnnSelectedNode]
return ok
}
// IsDelayBindingMode checks if claim is in delay binding mode.
func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) {
className := storagehelpers.GetPersistentVolumeClaimClass(claim)
if className == "" {
return false, nil
}
class, err := classLister.Get(className)
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
if class.VolumeBindingMode == nil {
return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className)
}
return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil
}
// GetBindVolumeToClaim returns a new volume which is bound to given claim. In
// addition, it returns a bool which indicates whether we made modification on
// original volume.
func GetBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) {
dirty := false
// Check if the volume was already bound (either by user or by controller)
shouldSetBoundByController := false
if !IsVolumeBoundToClaim(volume, claim) {
shouldSetBoundByController = true
}
// The volume from method args can be pointing to watcher cache. We must not
// modify these, therefore create a copy.
volumeClone := volume.DeepCopy()
// Bind the volume to the claim if it is not bound yet
if volume.Spec.ClaimRef == nil ||
volume.Spec.ClaimRef.Name != claim.Name ||
volume.Spec.ClaimRef.Namespace != claim.Namespace ||
volume.Spec.ClaimRef.UID != claim.UID {
claimRef, err := reference.GetReference(scheme.Scheme, claim)
if err != nil {
return nil, false, fmt.Errorf("unexpected error getting claim reference: %w", err)
}
volumeClone.Spec.ClaimRef = claimRef
dirty = true
}
// Set AnnBoundByController if it is not set yet
if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, AnnBoundByController) {
metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, AnnBoundByController, "yes")
dirty = true
}
return volumeClone, dirty, nil
}
// IsVolumeBoundToClaim returns true, if given volume is pre-bound or bound
// to specific claim. Both claim.Name and claim.Namespace must be equal.
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
func IsVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool {
if volume.Spec.ClaimRef == nil {
return false
}
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
return false
}
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
return false
}
return true
}
// FindMatchingVolume goes through the list of volumes to find the best matching volume
// for the claim.
//
// This function is used by both the PV controller and scheduler.
//
// delayBinding is true only in the PV controller path. When set, prebound PVs are still returned
// as a match for the claim, but unbound PVs are skipped.
//
// node is set only in the scheduler path. When set, the PV node affinity is checked against
// the node's labels.
//
// excludedVolumes is only used in the scheduler path, and is needed for evaluating multiple
// unbound PVCs for a single Pod at one time. As each PVC finds a matching PV, the chosen
// PV needs to be excluded from future matching.
func FindMatchingVolume(
claim *v1.PersistentVolumeClaim,
volumes []*v1.PersistentVolume,
node *v1.Node,
excludedVolumes map[string]*v1.PersistentVolume,
delayBinding bool) (*v1.PersistentVolume, error) {
var smallestVolume *v1.PersistentVolume
var smallestVolumeQty resource.Quantity
requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestedClass := storagehelpers.GetPersistentVolumeClaimClass(claim)
var selector labels.Selector
if claim.Spec.Selector != nil {
internalSelector, err := metav1.LabelSelectorAsSelector(claim.Spec.Selector)
if err != nil {
// should be unreachable code due to validation
return nil, fmt.Errorf("error creating internal label selector for claim: %v: %v", claimToClaimKey(claim), err)
}
selector = internalSelector
}
// Go through all available volumes with two goals:
// - find a volume that is either pre-bound by user or dynamically
// provisioned for this claim. Because of this we need to loop through
// all volumes.
// - find the smallest matching one if there is no volume pre-bound to
// the claim.
for _, volume := range volumes {
if _, ok := excludedVolumes[volume.Name]; ok {
// Skip volumes in the excluded list
continue
}
if volume.Spec.ClaimRef != nil && !IsVolumeBoundToClaim(volume, claim) {
continue
}
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
if volumeQty.Cmp(requestedQty) < 0 {
continue
}
// filter out mismatching volumeModes
if CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) {
continue
}
// check if PV's DeletionTimeStamp is set, if so, skip this volume.
if volume.ObjectMeta.DeletionTimestamp != nil {
continue
}
nodeAffinityValid := true
if node != nil {
// Scheduler path, check that the PV NodeAffinity
// is satisfied by the node
// volumeutil.CheckNodeAffinity is the most expensive call in this loop.
// We should check cheaper conditions first or consider optimizing this function.
err := volumeutil.CheckNodeAffinity(volume, node.Labels)
if err != nil {
nodeAffinityValid = false
}
}
if IsVolumeBoundToClaim(volume, claim) {
// If PV node affinity is invalid, return no match.
// This means the prebound PV (and therefore PVC)
// is not suitable for this node.
if !nodeAffinityValid {
return nil, nil
}
return volume, nil
}
if node == nil && delayBinding {
// PV controller does not bind this claim.
// Scheduler will handle binding unbound volumes
// Scheduler path will have node != nil
continue
}
// filter out:
// - volumes in non-available phase
// - volumes whose labels don't match the claim's selector, if specified
// - volumes in Class that is not requested
// - volumes whose NodeAffinity does not match the node
if volume.Status.Phase != v1.VolumeAvailable {
// We ignore volumes in non-available phase, because volumes that
// satisfies matching criteria will be updated to available, binding
// them now has high chance of encountering unnecessary failures
// due to API conflicts.
continue
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
continue
}
if storagehelpers.GetPersistentVolumeClass(volume) != requestedClass {
continue
}
if !nodeAffinityValid {
continue
}
if node != nil {
// Scheduler path
// Check that the access modes match
if !CheckAccessModes(claim, volume) {
continue
}
}
if smallestVolume == nil || smallestVolumeQty.Cmp(volumeQty) > 0 {
smallestVolume = volume
smallestVolumeQty = volumeQty
}
}
if smallestVolume != nil {
// Found a matching volume
return smallestVolume, nil
}
return nil, nil
}
// CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume
// and PersistentVolumeClaims
func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) bool {
// In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager.
// So we default a nil volumeMode to filesystem
requestedVolumeMode := v1.PersistentVolumeFilesystem
if pvcSpec.VolumeMode != nil {
requestedVolumeMode = *pvcSpec.VolumeMode
}
pvVolumeMode := v1.PersistentVolumeFilesystem
if pvSpec.VolumeMode != nil {
pvVolumeMode = *pvSpec.VolumeMode
}
return requestedVolumeMode != pvVolumeMode
}
// CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes
func CheckAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool {
pvModesMap := map[v1.PersistentVolumeAccessMode]bool{}
for _, mode := range volume.Spec.AccessModes {
pvModesMap[mode] = true
}
for _, mode := range claim.Spec.AccessModes {
_, ok := pvModesMap[mode]
if !ok {
return false
}
}
return true
}
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
// GetVolumeNodeAffinity returns a VolumeNodeAffinity for given key and value.
func GetVolumeNodeAffinity(key string, value string) *v1.VolumeNodeAffinity {
return &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: key,
Operator: v1.NodeSelectorOpIn,
Values: []string{value},
},
},
},
},
},
}
}
| mahak/origin | vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util/util.go | GO | apache-2.0 | 12,880 |
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright (c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "kcompat.h"
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
/* From lib/vsprintf.c */
#include <asm/div64.h>
static int skip_atoi(const char **s)
{
int i=0;
while (isdigit(**s))
i = i*10 + *((*s)++) - '0';
return i;
}
#define _kc_ZEROPAD 1 /* pad with zero */
#define _kc_SIGN 2 /* unsigned/signed long */
#define _kc_PLUS 4 /* show plus */
#define _kc_SPACE 8 /* space if plus */
#define _kc_LEFT 16 /* left justified */
#define _kc_SPECIAL 32 /* 0x */
#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
{
char c,sign,tmp[66];
const char *digits;
const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
int i;
digits = (type & _kc_LARGE) ? large_digits : small_digits;
if (type & _kc_LEFT)
type &= ~_kc_ZEROPAD;
if (base < 2 || base > 36)
return 0;
c = (type & _kc_ZEROPAD) ? '0' : ' ';
sign = 0;
if (type & _kc_SIGN) {
if (num < 0) {
sign = '-';
num = -num;
size--;
} else if (type & _kc_PLUS) {
sign = '+';
size--;
} else if (type & _kc_SPACE) {
sign = ' ';
size--;
}
}
if (type & _kc_SPECIAL) {
if (base == 16)
size -= 2;
else if (base == 8)
size--;
}
i = 0;
if (num == 0)
tmp[i++]='0';
else while (num != 0)
tmp[i++] = digits[do_div(num,base)];
if (i > precision)
precision = i;
size -= precision;
if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
while(size-->0) {
if (buf <= end)
*buf = ' ';
++buf;
}
}
if (sign) {
if (buf <= end)
*buf = sign;
++buf;
}
if (type & _kc_SPECIAL) {
if (base==8) {
if (buf <= end)
*buf = '0';
++buf;
} else if (base==16) {
if (buf <= end)
*buf = '0';
++buf;
if (buf <= end)
*buf = digits[33];
++buf;
}
}
if (!(type & _kc_LEFT)) {
while (size-- > 0) {
if (buf <= end)
*buf = c;
++buf;
}
}
while (i < precision--) {
if (buf <= end)
*buf = '0';
++buf;
}
while (i-- > 0) {
if (buf <= end)
*buf = tmp[i];
++buf;
}
while (size-- > 0) {
if (buf <= end)
*buf = ' ';
++buf;
}
return buf;
}
int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int len;
unsigned long long num;
int i, base;
char *str, *end, c;
const char *s;
int flags; /* flags to number() */
int field_width; /* width of output field */
int precision; /* min. # of digits for integers; max
number of chars for from string */
int qualifier; /* 'h', 'l', or 'L' for integer fields */
/* 'z' support added 23/7/1999 S.H. */
/* 'z' changed to 'Z' --davidm 1/25/99 */
str = buf;
end = buf + size - 1;
if (end < buf - 1) {
end = ((void *) -1);
size = end - buf + 1;
}
for (; *fmt ; ++fmt) {
if (*fmt != '%') {
if (str <= end)
*str = *fmt;
++str;
continue;
}
/* process flags */
flags = 0;
repeat:
++fmt; /* this also skips first '%' */
switch (*fmt) {
case '-': flags |= _kc_LEFT; goto repeat;
case '+': flags |= _kc_PLUS; goto repeat;
case ' ': flags |= _kc_SPACE; goto repeat;
case '#': flags |= _kc_SPECIAL; goto repeat;
case '0': flags |= _kc_ZEROPAD; goto repeat;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt))
field_width = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
field_width = va_arg(args, int);
if (field_width < 0) {
field_width = -field_width;
flags |= _kc_LEFT;
}
}
/* get the precision */
precision = -1;
if (*fmt == '.') {
++fmt;
if (isdigit(*fmt))
precision = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
precision = va_arg(args, int);
}
if (precision < 0)
precision = 0;
}
/* get the conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
qualifier = *fmt;
++fmt;
}
/* default base */
base = 10;
switch (*fmt) {
case 'c':
if (!(flags & _kc_LEFT)) {
while (--field_width > 0) {
if (str <= end)
*str = ' ';
++str;
}
}
c = (unsigned char) va_arg(args, int);
if (str <= end)
*str = c;
++str;
while (--field_width > 0) {
if (str <= end)
*str = ' ';
++str;
}
continue;
case 's':
s = va_arg(args, char *);
if (!s)
s = "<NULL>";
len = strnlen(s, precision);
if (!(flags & _kc_LEFT)) {
while (len < field_width--) {
if (str <= end)
*str = ' ';
++str;
}
}
for (i = 0; i < len; ++i) {
if (str <= end)
*str = *s;
++str; ++s;
}
while (len < field_width--) {
if (str <= end)
*str = ' ';
++str;
}
continue;
case 'p':
if (field_width == -1) {
field_width = 2*sizeof(void *);
flags |= _kc_ZEROPAD;
}
str = number(str, end,
(unsigned long) va_arg(args, void *),
16, field_width, precision, flags);
continue;
case 'n':
/* FIXME:
* What does C99 say about the overflow case here? */
if (qualifier == 'l') {
long * ip = va_arg(args, long *);
*ip = (str - buf);
} else if (qualifier == 'Z') {
size_t * ip = va_arg(args, size_t *);
*ip = (str - buf);
} else {
int * ip = va_arg(args, int *);
*ip = (str - buf);
}
continue;
case '%':
if (str <= end)
*str = '%';
++str;
continue;
/* integer number formats - set up the flags and "break" */
case 'o':
base = 8;
break;
case 'X':
flags |= _kc_LARGE;
case 'x':
base = 16;
break;
case 'd':
case 'i':
flags |= _kc_SIGN;
case 'u':
break;
default:
if (str <= end)
*str = '%';
++str;
if (*fmt) {
if (str <= end)
*str = *fmt;
++str;
} else {
--fmt;
}
continue;
}
if (qualifier == 'L')
num = va_arg(args, long long);
else if (qualifier == 'l') {
num = va_arg(args, unsigned long);
if (flags & _kc_SIGN)
num = (signed long) num;
} else if (qualifier == 'Z') {
num = va_arg(args, size_t);
} else if (qualifier == 'h') {
num = (unsigned short) va_arg(args, int);
if (flags & _kc_SIGN)
num = (signed short) num;
} else {
num = va_arg(args, unsigned int);
if (flags & _kc_SIGN)
num = (signed int) num;
}
str = number(str, end, num, base,
field_width, precision, flags);
}
if (str <= end)
*str = '\0';
else if (size > 0)
/* don't write out a null byte if the buf size is zero */
*end = '\0';
/* the trailing null byte doesn't count towards the total
* ++str;
*/
return str-buf;
}
int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = _kc_vsnprintf(buf,size,fmt,args);
va_end(args);
return i;
}
#endif /* < 2.4.8 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
/**************************************/
/* PCI DMA MAPPING */
#if defined(CONFIG_HIGHMEM)
#ifndef PCI_DRAM_OFFSET
#define PCI_DRAM_OFFSET 0
#endif
u64
_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
size_t size, int direction)
{
return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
PCI_DRAM_OFFSET);
}
#else /* CONFIG_HIGHMEM */
u64
_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
size_t size, int direction)
{
return pci_map_single(dev, (void *)page_address(page) + offset, size,
direction);
}
#endif /* CONFIG_HIGHMEM */
void
_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
int direction)
{
return pci_unmap_single(dev, dma_addr, size, direction);
}
#endif /* 2.4.13 => 2.4.3 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
/**************************************/
/* PCI DRIVER API */
int
_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
{
if (!pci_dma_supported(dev, mask))
return -EIO;
dev->dma_mask = mask;
return 0;
}
int
_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
{
int i;
for (i = 0; i < 6; i++) {
if (pci_resource_len(dev, i) == 0)
continue;
if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
pci_release_regions(dev);
return -EBUSY;
}
} else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
pci_release_regions(dev);
return -EBUSY;
}
}
}
return 0;
}
void
_kc_pci_release_regions(struct pci_dev *dev)
{
int i;
for (i = 0; i < 6; i++) {
if (pci_resource_len(dev, i) == 0)
continue;
if (pci_resource_flags(dev, i) & IORESOURCE_IO)
release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
}
}
/**************************************/
/* NETWORK DRIVER API */
struct net_device *
_kc_alloc_etherdev(int sizeof_priv)
{
struct net_device *dev;
int alloc_size;
alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
dev = kzalloc(alloc_size, GFP_KERNEL);
if (!dev)
return NULL;
if (sizeof_priv)
dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
dev->name[0] = '\0';
ether_setup(dev);
return dev;
}
int
_kc_is_valid_ether_addr(u8 *addr)
{
const char zaddr[6] = { 0, };
return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
}
#endif /* 2.4.3 => 2.4.0 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
int
_kc_pci_set_power_state(struct pci_dev *dev, int state)
{
return 0;
}
int
_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
{
return 0;
}
#endif /* 2.4.6 => 2.4.3 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
int off, int size)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
frag->page = page;
frag->page_offset = off;
frag->size = size;
skb_shinfo(skb)->nr_frags = i + 1;
}
/*
* Original Copyright:
* find_next_bit.c: fallback find next bit implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + ffs(tmp);
}
size_t _kc_strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
memcpy(dest, src, len);
dest[len] = '\0';
}
return ret;
}
#ifndef do_div
#if BITS_PER_LONG == 32
uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}
#endif /* BITS_PER_LONG == 32 */
#endif /* do_div */
#endif /* 2.6.0 => 2.4.6 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
return (i >= size) ? (size - 1) : i;
}
#endif /* < 2.6.4 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
#endif /* < 2.6.10 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
char *_kc_kstrdup(const char *s, unsigned int gfp)
{
size_t len;
char *buf;
if (!s)
return NULL;
len = strlen(s) + 1;
buf = kmalloc(len, gfp);
if (buf)
memcpy(buf, s, len);
return buf;
}
#endif /* < 2.6.13 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
void *_kc_kzalloc(size_t size, int flags)
{
void *ret = kmalloc(size, flags);
if (ret)
memset(ret, 0, size);
return ret;
}
#endif /* <= 2.6.13 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
int _kc_skb_pad(struct sk_buff *skb, int pad)
{
int ntail;
/* If the skbuff is non linear tailroom is always zero.. */
if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
memset(skb->data+skb->len, 0, pad);
return 0;
}
ntail = skb->data_len + pad - (skb->end - skb->tail);
if (likely(skb_cloned(skb) || ntail > 0)) {
if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
goto free_skb;
}
#ifdef MAX_SKB_FRAGS
if (skb_is_nonlinear(skb) &&
!__pskb_pull_tail(skb, skb->data_len))
goto free_skb;
#endif
memset(skb->data + skb->len, 0, pad);
return 0;
free_skb:
kfree_skb(skb);
return -ENOMEM;
}
#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
int _kc_pci_save_state(struct pci_dev *pdev)
{
struct adapter_struct *adapter = pci_get_drvdata(pdev);
int size = PCI_CONFIG_SPACE_LEN, i;
u16 pcie_cap_offset, pcie_link_status;
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
/* no ->dev for 2.4 kernels */
WARN_ON(pdev->dev.driver_data == NULL);
#endif
pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (pcie_cap_offset) {
if (!pci_read_config_word(pdev,
pcie_cap_offset + PCIE_LINK_STATUS,
&pcie_link_status))
size = PCIE_CONFIG_SPACE_LEN;
}
pci_config_space_ich8lan();
#ifdef HAVE_PCI_ERS
if (adapter->config_space == NULL)
#else
WARN_ON(adapter->config_space != NULL);
#endif
adapter->config_space = kmalloc(size, GFP_KERNEL);
if (!adapter->config_space) {
printk(KERN_ERR "Out of memory in pci_save_state\n");
return -ENOMEM;
}
for (i = 0; i < (size / 4); i++)
pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
return 0;
}
void _kc_pci_restore_state(struct pci_dev *pdev)
{
struct adapter_struct *adapter = pci_get_drvdata(pdev);
int size = PCI_CONFIG_SPACE_LEN, i;
u16 pcie_cap_offset;
u16 pcie_link_status;
if (adapter->config_space != NULL) {
pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (pcie_cap_offset &&
!pci_read_config_word(pdev,
pcie_cap_offset + PCIE_LINK_STATUS,
&pcie_link_status))
size = PCIE_CONFIG_SPACE_LEN;
pci_config_space_ich8lan();
for (i = 0; i < (size / 4); i++)
pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
#ifndef HAVE_PCI_ERS
kfree(adapter->config_space);
adapter->config_space = NULL;
#endif
}
}
#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
#ifdef HAVE_PCI_ERS
void _kc_free_netdev(struct net_device *netdev)
{
struct adapter_struct *adapter = netdev_priv(netdev);
if (adapter->config_space != NULL)
kfree(adapter->config_space);
#ifdef CONFIG_SYSFS
if (netdev->reg_state == NETREG_UNINITIALIZED) {
kfree((char *)netdev - netdev->padded);
} else {
BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
netdev->reg_state = NETREG_RELEASED;
class_device_put(&netdev->class_dev);
}
#else
kfree((char *)netdev - netdev->padded);
#endif
}
#endif
void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
{
void *p;
p = kzalloc(len, gfp);
if (p)
memcpy(p, src, len);
return p;
}
#endif /* <= 2.6.19 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
{
return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
}
#endif /* < 2.6.21 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
/* hexdump code taken from lib/hexdump.c */
static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, unsigned char *linebuf,
size_t linebuflen, bool ascii)
{
const u8 *ptr = buf;
u8 ch;
int j, lx = 0;
int ascii_column;
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
if (!len)
goto nil;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
switch (groupsize) {
case 8: {
const u64 *ptr8 = buf;
int ngroups = len / groupsize;
for (j = 0; j < ngroups; j++)
lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
"%s%16.16llx", j ? " " : "",
(unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
}
case 4: {
const u32 *ptr4 = buf;
int ngroups = len / groupsize;
for (j = 0; j < ngroups; j++)
lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
"%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
}
case 2: {
const u16 *ptr2 = buf;
int ngroups = len / groupsize;
for (j = 0; j < ngroups; j++)
lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
"%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
}
default:
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = hex_asc(ch >> 4);
linebuf[lx++] = hex_asc(ch & 0x0f);
linebuf[lx++] = ' ';
}
if (j)
lx--;
ascii_column = 3 * rowsize + 2;
break;
}
if (!ascii)
goto nil;
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
linebuf[lx++] = ' ';
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
: '.';
nil:
linebuf[lx++] = '\0';
}
void _kc_print_hex_dump(const char *level,
const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[200];
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
_kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
linebuf, sizeof(linebuf), ascii);
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
printk("%s%s%*p: %s\n", level, prefix_str,
(int)(2 * sizeof(void *)), ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
break;
default:
printk("%s%s%s\n", level, prefix_str, linebuf);
break;
}
}
}
#endif /* < 2.6.22 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
int ixgbe_dcb_netlink_register(void)
{
return 0;
}
int ixgbe_dcb_netlink_unregister(void)
{
return 0;
}
int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
{
return 0;
}
#endif /* < 2.6.23 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
#ifdef NAPI
#if defined(DRIVER_IXGBE) || defined(DRIVER_IGB) || defined(DRIVER_I40E) || \
defined(DRIVER_IXGBEVF)
struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
{
struct adapter_q_vector *q_vector = container_of(napi,
struct adapter_q_vector,
napi);
return &q_vector->poll_dev;
}
#endif
int __kc_adapter_clean(struct net_device *netdev, int *budget)
{
int work_done;
int work_to_do = min(*budget, netdev->quota);
#if defined(DRIVER_IXGBE) || defined(DRIVER_IGB) || defined(DRIVER_I40E) || \
defined(E1000E_MQ) || defined(DRIVER_IXGBEVF)
/* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
struct napi_struct *napi = netdev->priv;
#else
struct adapter_struct *adapter = netdev_priv(netdev);
struct napi_struct *napi = &adapter->rx_ring[0].napi;
#endif
work_done = napi->poll(napi, work_to_do);
*budget -= work_done;
netdev->quota -= work_done;
return (work_done >= work_to_do) ? 1 : 0;
}
#endif /* NAPI */
#endif /* <= 2.6.24 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
{
struct pci_dev *parent = pdev->bus->self;
u16 link_state;
int pos;
if (!parent)
return;
pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
if (pos) {
pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
link_state &= ~state;
pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
}
}
#endif /* < 2.6.26 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
#ifdef HAVE_TX_MQ
void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
{
struct adapter_struct *adapter = netdev_priv(netdev);
int i;
netif_stop_queue(netdev);
if (netif_is_multiqueue(netdev))
for (i = 0; i < adapter->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
}
void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
{
struct adapter_struct *adapter = netdev_priv(netdev);
int i;
netif_wake_queue(netdev);
if (netif_is_multiqueue(netdev))
for (i = 0; i < adapter->num_tx_queues; i++)
netif_wake_subqueue(netdev, i);
}
void _kc_netif_tx_start_all_queues(struct net_device *netdev)
{
struct adapter_struct *adapter = netdev_priv(netdev);
int i;
netif_start_queue(netdev);
if (netif_is_multiqueue(netdev))
for (i = 0; i < adapter->num_tx_queues; i++)
netif_start_subqueue(netdev, i);
}
#endif /* HAVE_TX_MQ */
#ifndef __WARN_printf
void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
{
va_list args;
printk(KERN_WARNING "------------[ cut here ]------------\n");
printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
dump_stack();
}
#endif /* __WARN_printf */
#endif /* < 2.6.27 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
int
_kc_pci_prepare_to_sleep(struct pci_dev *dev)
{
pci_power_t target_state;
int error;
target_state = pci_choose_state(dev, PMSG_SUSPEND);
pci_enable_wake(dev, target_state, true);
error = pci_set_power_state(dev, target_state);
if (error)
pci_enable_wake(dev, target_state, false);
return error;
}
int
_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
{
int err;
err = pci_enable_wake(dev, PCI_D3cold, enable);
if (err)
goto out;
err = pci_enable_wake(dev, PCI_D3hot, enable);
out:
return err;
}
#endif /* < 2.6.28 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
{
u16 old_cmd, cmd;
pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
if (enable)
cmd = old_cmd | PCI_COMMAND_MASTER;
else
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
enable ? "enabling" : "disabling");
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
pdev->is_busmaster = enable;
#endif
}
void _kc_pci_clear_master(struct pci_dev *dev)
{
__kc_pci_set_master(dev, false);
}
#endif /* < 2.6.29 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
int _kc_pci_num_vf(struct pci_dev *dev)
{
int num_vf = 0;
#ifdef CONFIG_PCI_IOV
struct pci_dev *vfdev;
/* loop through all ethernet devices starting at PF dev */
vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
while (vfdev) {
if (vfdev->is_virtfn && vfdev->physfn == dev)
num_vf++;
vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
}
#endif
return num_vf;
}
#endif /* RHEL_RELEASE_CODE */
#endif /* < 2.6.34 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
#if defined(DRIVER_IXGBE) || defined(DRIVER_IGB) || defined(DRIVER_I40E) || \
defined(DRIVER_IXGBEVF)
#ifdef HAVE_TX_MQ
#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
#ifndef CONFIG_NETDEVICES_MULTIQUEUE
void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
unsigned int real_num = dev->real_num_tx_queues;
struct Qdisc *qdisc;
int i;
if (unlikely(txq > dev->num_tx_queues))
;
else if (txq > real_num)
dev->real_num_tx_queues = txq;
else if ( txq < real_num) {
dev->real_num_tx_queues = txq;
for (i = txq; i < dev->num_tx_queues; i++) {
qdisc = netdev_get_tx_queue(dev, i)->qdisc;
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
}
}
}
#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
#endif /* HAVE_TX_MQ */
#endif /* defined(DRIVER_IXGBE) || defined(DRIVER_IGB) || defined(DRIVER_I40E) */
ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count)
{
loff_t pos = *ppos;
size_t res;
if (pos < 0)
return -EINVAL;
if (pos >= available || !count)
return 0;
if (count > available - pos)
count = available - pos;
res = copy_from_user(to + pos, from, count);
if (res == count)
return -EFAULT;
count -= res;
*ppos = pos + count;
return count;
}
#endif /* < 2.6.35 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
static const u32 _kc_flags_dup_features =
(ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
u32 _kc_ethtool_op_get_flags(struct net_device *dev)
{
return dev->features & _kc_flags_dup_features;
}
int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
{
if (data & ~supported)
return -EINVAL;
dev->features = ((dev->features & ~_kc_flags_dup_features) |
(data & _kc_flags_dup_features));
return 0;
}
#endif /* < 2.6.36 */
/******************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
#ifdef HAVE_NETDEV_SELECT_QUEUE
#include <net/ip.h>
#include <linux/pkt_sched.h>
u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb,
u16 num_tx_queues)
{
u32 hash;
u16 qoffset = 0;
u16 qcount = num_tx_queues;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
while (unlikely(hash >= num_tx_queues))
hash -= num_tx_queues;
return hash;
}
if (netdev_get_num_tc(dev)) {
struct adapter_struct *kc_adapter = netdev_priv(dev);
if (skb->priority == TC_PRIO_CONTROL) {
qoffset = kc_adapter->dcb_tc - 1;
} else {
qoffset = skb->vlan_tci;
qoffset &= IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
qoffset >>= 13;
}
qcount = kc_adapter->ring_feature[RING_F_RSS].indices;
qoffset *= qcount;
}
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
#ifdef NETIF_F_RXHASH
hash = (__force u16) skb->protocol ^ skb->rxhash;
#else
hash = skb->protocol;
#endif
hash = jhash_1word(hash, _kc_hashrnd);
return (u16) (((u64) hash * qcount) >> 32) + qoffset;
}
#endif /* HAVE_NETDEV_SELECT_QUEUE */
u8 _kc_netdev_get_num_tc(struct net_device *dev)
{
struct adapter_struct *kc_adapter = netdev_priv(dev);
if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED)
return kc_adapter->dcb_tc;
else
return 0;
}
int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc)
{
struct adapter_struct *kc_adapter = netdev_priv(dev);
if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS)
return -EINVAL;
kc_adapter->dcb_tc = num_tc;
return 0;
}
u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
{
struct adapter_struct *kc_adapter = netdev_priv(dev);
return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up);
}
#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
#endif /* < 2.6.39 */
/******************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
int off, int size, unsigned int truesize)
{
skb_fill_page_desc(skb, i, page, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
int _kc_simple_open(struct inode *inode, struct file *file)
{
if (inode->i_private)
file->private_data = inode->i_private;
return 0;
}
#endif /* < 3.4.0 */
/******************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
static inline int __kc_pcie_cap_version(struct pci_dev *dev)
{
int pos;
u16 reg16;
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!pos)
return 0;
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
return reg16 & PCI_EXP_FLAGS_VERS;
}
static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
{
return true;
}
static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
return __kc_pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_ENDPOINT ||
type == PCI_EXP_TYPE_LEG_END;
}
static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
int pos;
u16 pcie_flags_reg;
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!pos)
return 0;
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
return __kc_pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
}
static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
return __kc_pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC;
}
static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
if (!pci_is_pcie(dev))
return false;
switch (pos) {
case PCI_EXP_FLAGS_TYPE:
return true;
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVSTA:
return __kc_pcie_cap_has_devctl(dev);
case PCI_EXP_LNKCAP:
case PCI_EXP_LNKCTL:
case PCI_EXP_LNKSTA:
return __kc_pcie_cap_has_lnkctl(dev);
case PCI_EXP_SLTCAP:
case PCI_EXP_SLTCTL:
case PCI_EXP_SLTSTA:
return __kc_pcie_cap_has_sltctl(dev);
case PCI_EXP_RTCTL:
case PCI_EXP_RTCAP:
case PCI_EXP_RTSTA:
return __kc_pcie_cap_has_rtctl(dev);
case PCI_EXP_DEVCAP2:
case PCI_EXP_DEVCTL2:
case PCI_EXP_LNKCAP2:
case PCI_EXP_LNKCTL2:
case PCI_EXP_LNKSTA2:
return __kc_pcie_cap_version(dev) > 1;
default:
return false;
}
}
/*
* Note that these accessor functions are only for the "PCI Express
* Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
* other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
*/
int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
int ret;
*val = 0;
if (pos & 1)
return -EINVAL;
if (__kc_pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_word() fails, it may
* have been written as 0xFFFF if hardware error happens
* during pci_read_config_word().
*/
if (ret)
*val = 0;
return ret;
}
/*
* For Functions that do not implement the Slot Capabilities,
* Slot Status, and Slot Control registers, these spaces must
* be hardwired to 0b, with the exception of the Presence Detect
* State bit in the Slot Status register of Downstream Ports,
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
*/
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
return 0;
}
int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
if (pos & 1)
return -EINVAL;
if (!__kc_pcie_capability_reg_implemented(dev, pos))
return 0;
return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
u16 clear, u16 set)
{
int ret;
u16 val;
ret = __kc_pcie_capability_read_word(dev, pos, &val);
if (!ret) {
val &= ~clear;
val |= set;
ret = __kc_pcie_capability_write_word(dev, pos, val);
}
return ret;
}
int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
u16 clear)
{
return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
}
#endif /* < 3.7.0 */
/******************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
#ifdef CONFIG_XPS
#if NR_CPUS < 64
#define _KC_MAX_XPS_CPUS NR_CPUS
#else
#define _KC_MAX_XPS_CPUS 64
#endif
/*
* netdev_queue sysfs structures and functions.
*/
struct _kc_netdev_queue_attribute {
struct attribute attr;
ssize_t (*show)(struct netdev_queue *queue,
struct _kc_netdev_queue_attribute *attr, char *buf);
ssize_t (*store)(struct netdev_queue *queue,
struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len);
};
#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \
struct _kc_netdev_queue_attribute, attr)
int __kc_netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
u16 index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, index);
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
/* Redhat requires some odd extended netdev structures */
struct netdev_tx_queue_extended *txq_ext =
netdev_extended(dev)->_tx_ext + index;
struct kobj_type *ktype = txq_ext->kobj.ktype;
#else
struct kobj_type *ktype = txq->kobj.ktype;
#endif
struct _kc_netdev_queue_attribute *xps_attr;
struct attribute *attr = NULL;
int i, len, err;
#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9)
char buf[_KC_XPS_BUFLEN];
if (!ktype)
return -ENOMEM;
/* attempt to locate the XPS attribute in the Tx queue */
for (i = 0; (attr = ktype->default_attrs[i]); i++) {
if (!strcmp("xps_cpus", attr->name))
break;
}
/* if we did not find it return an error */
if (!attr)
return -EINVAL;
/* copy the mask into a string */
len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN,
cpumask_bits(mask), _KC_MAX_XPS_CPUS);
if (!len)
return -ENOMEM;
xps_attr = to_kc_netdev_queue_attr(attr);
/* Store the XPS value using the SYSFS store call */
err = xps_attr->store(txq, xps_attr, buf, len);
/* we only had an error on err < 0 */
return (err < 0) ? err : 0;
}
#endif /* CONFIG_XPS */
#ifdef HAVE_NETDEV_SELECT_QUEUE
static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_XPS
struct xps_dev_maps *dev_maps;
struct xps_map *map;
int queue_index = -1;
rcu_read_lock();
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
/* Redhat requires some odd extended netdev structures */
dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps);
#else
dev_maps = rcu_dereference(dev->xps_maps);
#endif
if (dev_maps) {
map = rcu_dereference(
dev_maps->cpu_map[raw_smp_processor_id()]);
if (map) {
if (map->len == 1)
queue_index = map->queues[0];
else {
u32 hash;
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^
skb->rxhash;
hash = jhash_1word(hash, _kc_hashrnd);
queue_index = map->queues[
((u64)hash * map->len) >> 32];
}
if (unlikely(queue_index >= dev->real_num_tx_queues))
queue_index = -1;
}
}
rcu_read_unlock();
return queue_index;
#else
struct adapter_struct *kc_adapter = netdev_priv(dev);
int queue_index = -1;
if (kc_adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
queue_index = skb_rx_queue_recorded(skb) ?
skb_get_rx_queue(skb) :
smp_processor_id();
while (unlikely(queue_index >= dev->real_num_tx_queues))
queue_index -= dev->real_num_tx_queues;
return queue_index;
}
return -1;
#endif
}
u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
int new_index;
if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) {
#ifdef CONFIG_XPS
if (!skb->ooo_okay)
#endif
return queue_index;
}
new_index = kc_get_xps_queue(dev, skb);
if (new_index < 0)
new_index = skb_tx_hash(dev, skb);
if (queue_index != new_index && sk) {
struct dst_entry *dst =
rcu_dereference(sk->sk_dst_cache);
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, new_index);
}
return new_index;
}
#endif /* HAVE_NETDEV_SELECT_QUEUE */
#endif /* 3.9.0 */
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
#ifdef CONFIG_PCI_IOV
int __kc_pci_vfs_assigned(struct pci_dev *dev)
{
unsigned int vfs_assigned = 0;
#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
int pos;
struct pci_dev *vfdev;
unsigned short dev_id;
/* only search if we are a PF */
if (!dev->is_physfn)
return 0;
/* find SR-IOV capability */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
if (!pos)
return 0;
/*
* determine the device ID for the VFs, the vendor ID will be the
* same as the PF so there is no need to check for that one
*/
pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
/* loop through all the VFs to see if we own any that are assigned */
vfdev = pci_get_device(dev->vendor, dev_id, NULL);
while (vfdev) {
/*
* It is considered assigned if it is a virtual function with
* our dev as the physical function and the assigned bit is set
*/
if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
(vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
vfs_assigned++;
vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
}
#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
return vfs_assigned;
}
#endif /* CONFIG_PCI_IOV */
#endif /* 3.10.0 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int err = dma_set_mask(dev, mask);
if (!err)
/* coherent mask for the same size will always succeed if
* dma_set_mask does
*/
dma_set_coherent_mask(dev, mask);
return err;
}
#endif /* 3.13.0 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
int nvec = maxvec;
int rc;
if (maxvec < minvec)
return -ERANGE;
do {
rc = pci_enable_msix(dev, entries, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
} while (rc);
return nvec;
}
#endif /* 3.14.0 */
| mpeuster/son-examples | vnfs/sonata-vtc-vnf-docker/pfring_web_api/vtc/PF_RING/drivers/DNA/ixgbe-3.21.2-DNA/src/kcompat.c | C | apache-2.0 | 42,034 |
/*
* QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
* Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using NUnit.Framework;
using QuantConnect.Data;
using QuantConnect.Data.Market;
using QuantConnect.Indicators;
using QuantConnect.Orders;
using QuantConnect.Securities;
using QuantConnect.Securities.Equity;
namespace QuantConnect.Tests.Common.Securities.Equity
{
[TestFixture]
public class EquityTransactionModelTests
{
private static readonly DateTime Noon = new DateTime(2014, 6, 24, 12, 0, 0);
private static readonly TimeKeeper TimeKeeper = new TimeKeeper(Noon.ConvertToUtc(TimeZones.NewYork), new[] {TimeZones.NewYork});
[Test]
public void PerformsMarketFillBuy()
{
var model = new EquityTransactionModel();
var order = new MarketOrder(Symbols.SPY, 100, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101.123m));
var fill = model.MarketFill(security, order);
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(security.Price, fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
[Test]
public void PerformsMarketFillSell()
{
var model = new EquityTransactionModel();
var order = new MarketOrder(Symbols.SPY, -100, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101.123m));
var fill = model.MarketFill(security, order);
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(security.Price, fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
[Test]
public void PerformsLimitFillBuy()
{
var model = new EquityTransactionModel();
var order = new LimitOrder(Symbols.SPY, 100, 101.5m, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 102m));
var fill = model.LimitFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new TradeBar(Noon, Symbols.SPY, 102m, 103m, 101m, 102.3m, 100));
fill = model.LimitFill(security, order);
// this fills worst case scenario, so it's at the limit price
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(Math.Min(order.LimitPrice, security.High), fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
[Test]
public void PerformsLimitFillSell()
{
var model = new EquityTransactionModel();
var order = new LimitOrder(Symbols.SPY, -100, 101.5m, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101m));
var fill = model.LimitFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new TradeBar(Noon, Symbols.SPY, 102m, 103m, 101m, 102.3m, 100));
fill = model.LimitFill(security, order);
// this fills worst case scenario, so it's at the limit price
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(Math.Max(order.LimitPrice, security.Low), fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
[Test]
public void PerformsStopLimitFillBuy()
{
var model = new EquityTransactionModel();
var order = new StopLimitOrder(Symbols.SPY, 100, 101.5m, 101.75m, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 100m));
var fill = model.StopLimitFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 102m));
fill = model.StopLimitFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101.66m));
fill = model.StopLimitFill(security, order);
// this fills worst case scenario, so it's at the limit price
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(order.LimitPrice, fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
[Test]
public void PerformsStopLimitFillSell()
{
var model = new EquityTransactionModel();
var order = new StopLimitOrder(Symbols.SPY, -100, 101.75m, 101.50m, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 102m));
var fill = model.StopLimitFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101m));
fill = model.StopLimitFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101.66m));
fill = model.StopLimitFill(security, order);
// this fills worst case scenario, so it's at the limit price
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(order.LimitPrice, fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
[Test]
public void PerformsStopMarketFillBuy()
{
var model = new EquityTransactionModel();
var order = new StopMarketOrder(Symbols.SPY, 100, 101.5m, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101m));
var fill = model.StopMarketFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 102.5m));
fill = model.StopMarketFill(security, order);
// this fills worst case scenario, so it's min of asset/stop price
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(Math.Max(security.Price, order.StopPrice), fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
[Test]
public void PerformsStopMarketFillSell()
{
var model = new EquityTransactionModel();
var order = new StopMarketOrder(Symbols.SPY, -100, 101.5m, Noon);
var config = CreateTradeBarConfig();
var security = new Security(SecurityExchangeHoursTests.CreateUsEquitySecurityExchangeHours(), config, new Cash(CashBook.AccountCurrency, 0, 1m), SymbolProperties.GetDefault(CashBook.AccountCurrency));
security.SetLocalTimeKeeper(TimeKeeper.GetLocalTimeKeeper(TimeZones.NewYork));
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 102m));
var fill = model.StopMarketFill(security, order);
Assert.AreEqual(0, fill.FillQuantity);
Assert.AreEqual(0, fill.FillPrice);
Assert.AreEqual(OrderStatus.None, fill.Status);
security.SetMarketPrice(new IndicatorDataPoint(Symbols.SPY, Noon, 101m));
fill = model.StopMarketFill(security, order);
// this fills worst case scenario, so it's min of asset/stop price
Assert.AreEqual(order.Quantity, fill.FillQuantity);
Assert.AreEqual(Math.Min(security.Price, order.StopPrice), fill.FillPrice);
Assert.AreEqual(OrderStatus.Filled, fill.Status);
}
private SubscriptionDataConfig CreateTradeBarConfig()
{
return new SubscriptionDataConfig(typeof(TradeBar), Symbols.SPY, Resolution.Minute, TimeZones.NewYork, TimeZones.NewYork, true, true, false);
}
}
}
| AnshulYADAV007/Lean | Tests/Common/Securities/Equity/EquityTransactionModelTests.cs | C# | apache-2.0 | 11,759 |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "config.h"
#include "core/animation/animatable/AnimatableValueKeyframe.h"
#include "core/animation/LegacyStyleInterpolation.h"
namespace blink {
AnimatableValueKeyframe::AnimatableValueKeyframe(const AnimatableValueKeyframe& copyFrom)
: Keyframe(copyFrom.m_offset, copyFrom.m_composite, copyFrom.m_easing)
{
for (PropertyValueMap::const_iterator iter = copyFrom.m_propertyValues.begin(); iter != copyFrom.m_propertyValues.end(); ++iter)
setPropertyValue(iter->key, iter->value.get());
}
PropertySet AnimatableValueKeyframe::properties() const
{
// This is not used in time-critical code, so we probably don't need to
// worry about caching this result.
PropertySet properties;
for (PropertyValueMap::const_iterator iter = m_propertyValues.begin(); iter != m_propertyValues.end(); ++iter)
properties.add(*iter.keys());
return properties;
}
PassRefPtrWillBeRawPtr<Keyframe> AnimatableValueKeyframe::clone() const
{
return adoptRefWillBeNoop(new AnimatableValueKeyframe(*this));
}
PassOwnPtrWillBeRawPtr<Keyframe::PropertySpecificKeyframe> AnimatableValueKeyframe::createPropertySpecificKeyframe(CSSPropertyID property) const
{
return adoptPtrWillBeNoop(new PropertySpecificKeyframe(offset(), &easing(), propertyValue(property), composite()));
}
void AnimatableValueKeyframe::trace(Visitor* visitor)
{
#if ENABLE(OILPAN)
visitor->trace(m_propertyValues);
#endif
Keyframe::trace(visitor);
}
AnimatableValueKeyframe::PropertySpecificKeyframe::PropertySpecificKeyframe(double offset, PassRefPtr<TimingFunction> easing, const AnimatableValue* value, AnimationEffect::CompositeOperation op)
: Keyframe::PropertySpecificKeyframe(offset, easing, op)
, m_value(const_cast<AnimatableValue*>(value))
{ }
AnimatableValueKeyframe::PropertySpecificKeyframe::PropertySpecificKeyframe(double offset, PassRefPtr<TimingFunction> easing, PassRefPtrWillBeRawPtr<AnimatableValue> value)
: Keyframe::PropertySpecificKeyframe(offset, easing, AnimationEffect::CompositeReplace)
, m_value(value)
{
ASSERT(!isNull(m_offset));
}
PassOwnPtrWillBeRawPtr<Keyframe::PropertySpecificKeyframe> AnimatableValueKeyframe::PropertySpecificKeyframe::cloneWithOffset(double offset) const
{
Keyframe::PropertySpecificKeyframe* theClone = new PropertySpecificKeyframe(offset, m_easing, m_value);
return adoptPtrWillBeNoop(theClone);
}
PassRefPtrWillBeRawPtr<Interpolation> AnimatableValueKeyframe::PropertySpecificKeyframe::createInterpolation(CSSPropertyID property, Keyframe::PropertySpecificKeyframe* end, Element*) const
{
AnimatableValuePropertySpecificKeyframe* to = toAnimatableValuePropertySpecificKeyframe(end);
return LegacyStyleInterpolation::create(value(), to->value(), property);
}
PassOwnPtrWillBeRawPtr<Keyframe::PropertySpecificKeyframe> AnimatableValueKeyframe::PropertySpecificKeyframe::neutralKeyframe(double offset, PassRefPtr<TimingFunction> easing) const
{
return adoptPtrWillBeNoop(new AnimatableValueKeyframe::PropertySpecificKeyframe(offset, easing, AnimatableValue::neutralValue(), AnimationEffect::CompositeAdd));
}
void AnimatableValueKeyframe::PropertySpecificKeyframe::trace(Visitor* visitor)
{
visitor->trace(m_value);
Keyframe::PropertySpecificKeyframe::trace(visitor);
}
}
| mxOBS/deb-pkg_trusty_chromium-browser | third_party/WebKit/Source/core/animation/animatable/AnimatableValueKeyframe.cpp | C++ | bsd-3-clause | 3,461 |
#!/bin/bash
set -e
if [ "$CIRCLE_BRANCH" = "$REACT_WEBSITE_BRANCH" ]; then
GH_PAGES_DIR=`pwd`/../react-gh-pages
# check if directory exists (restored from cache)
if [ -d $GH_PAGES_DIR ]; then
pushd $GH_PAGES_DIR
git pull origin gh-pages
popd
else
git clone --branch gh-pages --depth=1 \
https://reactjs-bot@github.com/facebook/react.git \
$GH_PAGES_DIR
fi
pushd docs
bundle exec rake release
cd $GH_PAGES_DIR
git status
git --no-pager diff
if ! git diff-index --quiet HEAD --; then
git add -A .
git commit -m "Rebuild website"
git push origin gh-pages
fi
popd
else
echo "Not building website"
fi
| leexiaosi/react | scripts/circleci/build_gh_pages.sh | Shell | bsd-3-clause | 669 |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
namespace ZendTest\Stdlib\TestAsset;
class ClassMethodsCamelCase
{
protected $fooBar = '1';
protected $fooBarBaz = '2';
protected $isFoo = true;
protected $isBar = true;
protected $hasFoo = true;
protected $hasBar = true;
public function getFooBar()
{
return $this->fooBar;
}
public function setFooBar($value)
{
$this->fooBar = $value;
return $this;
}
public function getFooBarBaz()
{
return $this->fooBarBaz;
}
public function setFooBarBaz($value)
{
$this->fooBarBaz = $value;
return $this;
}
public function getIsFoo()
{
return $this->isFoo;
}
public function setIsFoo($value)
{
$this->isFoo = $value;
return $this;
}
public function isBar()
{
return $this->isBar;
}
public function setIsBar($value)
{
$this->isBar = $value;
return $this;
}
public function getHasFoo()
{
return $this->hasFoo;
}
public function setHasFoo($value)
{
$this->hasFoo = $value;
return $this;
}
public function hasBar()
{
return $this->hasBar;
}
public function setHasBar($value)
{
$this->hasBar = $value;
return $this;
}
}
| exclie/Imagenologia | vendor/zendframework/zendframework/tests/ZendTest/Stdlib/TestAsset/ClassMethodsCamelCase.php | PHP | bsd-3-clause | 1,642 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.